summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap6
-rw-r--r--Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update14
-rw-r--r--Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml3
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/xlnx,versal-ddrmc-edac.yaml57
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml2
-rw-r--r--Documentation/filesystems/files.rst53
-rw-r--r--Documentation/filesystems/fscrypt.rst121
-rw-r--r--Documentation/filesystems/nfs/exporting.rst7
-rw-r--r--Documentation/filesystems/porting.rst7
-rw-r--r--Documentation/netlink/specs/devlink.yaml18
-rw-r--r--Documentation/netlink/specs/nfsd.yaml89
-rw-r--r--Documentation/networking/representors.rst8
-rw-r--r--Documentation/rust/general-information.rst2
-rw-r--r--Documentation/trace/fprobe.rst8
-rw-r--r--MAINTAINERS44
-rw-r--r--Makefile4
-rw-r--r--arch/arc/kernel/troubleshoot.c6
-rw-r--r--arch/arm/boot/dts/rockchip/rk3128.dtsi18
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi6
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-l4.dtsi2
-rw-r--r--arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi6
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c60
-rw-r--r--arch/arm/mach-omap1/timer32k.c14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dts32
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi32
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi10
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/kvm/arch_timer.c13
-rw-r--r--arch/arm64/kvm/emulate-nested.c2
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c44
-rw-r--r--arch/arm64/kvm/pmu.c4
-rw-r--r--arch/arm64/kvm/sys_regs.c4
-rw-r--r--arch/loongarch/include/asm/io.h5
-rw-r--r--arch/loongarch/include/asm/linkage.h8
-rw-r--r--arch/loongarch/include/asm/pgtable-bits.h4
-rw-r--r--arch/loongarch/kernel/entry.S4
-rw-r--r--arch/loongarch/kernel/genex.S16
-rw-r--r--arch/loongarch/kernel/setup.c10
-rw-r--r--arch/loongarch/mm/init.c9
-rw-r--r--arch/loongarch/mm/tlbex.S36
-rw-r--r--arch/mips/kvm/mmu.c3
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/lib/qspinlock.c3
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c9
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/pgtable.c32
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/riscv/Kconfig3
-rw-r--r--arch/riscv/Kconfig.errata1
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi2
-rw-r--r--arch/riscv/boot/dts/thead/th1520.dtsi1
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/riscv/mm/hugetlbpage.c19
-rw-r--r--arch/s390/boot/vmem.c7
-rw-r--r--arch/s390/hypfs/inode.c4
-rw-r--r--arch/s390/kvm/interrupt.c16
-rw-r--r--arch/s390/pci/pci_dma.c15
-rw-r--r--arch/sparc/lib/checksum_32.S2
-rw-r--r--arch/x86/boot/compressed/sev.c10
-rw-r--r--arch/x86/include/asm/fpu/api.h3
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/svm.h1
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c3
-rw-r--r--arch/x86/kernel/fpu/core.c5
-rw-r--r--arch/x86/kernel/fpu/xstate.c12
-rw-r--r--arch/x86/kernel/fpu/xstate.h3
-rw-r--r--arch/x86/kernel/i8259.c38
-rw-r--r--arch/x86/kernel/sev-shared.c53
-rw-r--r--arch/x86/kernel/sev.c30
-rw-r--r--arch/x86/kernel/tsc_sync.c10
-rw-r--r--arch/x86/kvm/cpuid.c8
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/pmu.c27
-rw-r--r--arch/x86/kvm/pmu.h6
-rw-r--r--arch/x86/kvm/svm/avic.c5
-rw-r--r--arch/x86/kvm/svm/nested.c3
-rw-r--r--arch/x86/kvm/svm/pmu.c2
-rw-r--r--arch/x86/kvm/svm/svm.c5
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c4
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/lib/copy_mc.c8
-rw-r--r--block/bdev.c65
-rw-r--r--block/blk-throttle.c6
-rw-r--r--block/disk-events.c18
-rw-r--r--block/fops.c44
-rw-r--r--block/genhd.c19
-rw-r--r--block/ioctl.c11
-rw-r--r--block/partitions/core.c43
-rw-r--r--block/sed-opal.c7
-rw-r--r--crypto/asymmetric_keys/public_key.c5
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h1
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c9
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h5
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h8
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c12
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c9
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c3
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/irq.c7
-rw-r--r--drivers/acpi/nfit/core.c22
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/libata-scsi.c5
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/ataflop.c4
-rw-r--r--drivers/block/drbd/drbd_int.h2
-rw-r--r--drivers/block/drbd/drbd_nl.c65
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/pktcdvd.c76
-rw-r--r--drivers/block/rnbd/rnbd-srv.c27
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c40
-rw-r--r--drivers/block/zram/zram_drv.c31
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/hci_vhci.c3
-rw-r--r--drivers/cache/Kconfig2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/clk/clk.c21
-rw-r--r--drivers/clk/socfpga/clk-gate.c27
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c2
-rw-r--r--drivers/clk/ti/clk-44xx.c5
-rw-r--r--drivers/clk/ti/clk-54xx.c4
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c14
-rw-r--r--drivers/edac/Kconfig12
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/versal_edac.c1069
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/efi/efi.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c7
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h2
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c64
-rw-r--r--drivers/firmware/imx/imx-dsp.c2
-rw-r--r--drivers/fpga/tests/Kconfig4
-rw-r--r--drivers/fpga/tests/fpga-region-test.c2
-rw-r--r--drivers/gpio/gpio-vf610.c7
-rw-r--r--drivers/gpio/gpiolib-acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c14
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c6
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c24
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c9
-rw-r--r--drivers/gpu/drm/logicvc/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c14
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c8
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c9
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c2
-rw-r--r--drivers/iio/adc/exynos_adc.c24
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c39
-rw-r--r--drivers/iio/adc/xilinx-xadc.h2
-rw-r--r--drivers/iio/afe/iio-rescale.c19
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c4
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c2
-rw-r--r--drivers/md/bcache/Kconfig10
-rw-r--r--drivers/md/bcache/Makefile4
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/super.c96
-rw-r--r--drivers/md/bcache/util.h3
-rw-r--r--drivers/md/dm.c20
-rw-r--r--drivers/md/md.c23
-rw-r--r--drivers/md/md.h4
-rw-r--r--drivers/misc/fastrpc.c34
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/mmc/core/block.c31
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sdio.c8
-rw-r--r--drivers/mmc/host/mtk-sd.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c104
-rw-r--r--drivers/mmc/host/sdhci-sprd.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c51
-rw-r--r--drivers/mtd/maps/physmap-core.c11
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c23
-rw-r--r--drivers/mtd/nand/raw/nand_base.c3
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c3
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c3
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c9
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c2
-rw-r--r--drivers/mtd/nand/spi/micron.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c24
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c36
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c35
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/sfc/tc.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/ti/Kconfig5
-rw-r--r--drivers/net/ethernet/ti/Makefile7
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c9
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c10
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/gtp.c5
-rw-r--r--drivers/net/ieee802154/adf7242.c5
-rw-r--r--drivers/net/mdio/mdio-mux.c47
-rw-r--r--drivers/net/phy/bcm7xxx.c3
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/r8152.c303
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c21
-rw-r--r--drivers/nvme/host/auth.c4
-rw-r--r--drivers/nvme/host/ioctl.c10
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c9
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c20
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/tcp.c7
-rw-r--r--drivers/nvmem/imx-ocotp.c6
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c24
-rw-r--r--drivers/phy/realtek/Kconfig5
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c10
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c10
-rw-r--r--drivers/pinctrl/core.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c17
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c21
-rw-r--r--drivers/platform/surface/surface_platform_profile.c3
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c73
-rw-r--r--drivers/platform/x86/apple-gmux.c14
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c15
-rw-r--r--drivers/platform/x86/asus-wmi.h2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c8
-rw-r--r--drivers/platform/x86/mlx-platform.c5
-rw-r--r--drivers/platform/x86/msi-ec.c3
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dasd_genhd.c45
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c2
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/sd.c39
-rw-r--r--drivers/soc/renesas/Kconfig10
-rw-r--r--drivers/spi/spi-npcm-fiu.c5
-rw-r--r--drivers/target/target_core_iblock.c19
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_pscsi.c26
-rw-r--r--drivers/target/target_core_pscsi.h2
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/tty/tty_io.c10
-rw-r--r--drivers/usb/core/devio.c26
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/vdpa/mlx5/net/debug.c5
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c70
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h11
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c5
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c4
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c2
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c2
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.h2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c4
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/virtio/virtio_mmio.c19
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c2
-rw-r--r--fs/9p/vfs_inode.c6
-rw-r--r--fs/9p/vfs_inode_dotl.c16
-rw-r--r--fs/9p/xattr.c8
-rw-r--r--fs/9p/xattr.h2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/inode.c13
-rw-r--r--fs/affs/amigaffs.c4
-rw-r--r--fs/affs/inode.c17
-rw-r--r--fs/afs/dynroot.c2
-rw-r--r--fs/afs/inode.c8
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/write.c2
-rw-r--r--fs/afs/xattr.c2
-rw-r--r--fs/attr.c4
-rw-r--r--fs/autofs/autofs_i.h20
-rw-r--r--fs/autofs/init.c9
-rw-r--r--fs/autofs/inode.c437
-rw-r--r--fs/autofs/root.c6
-rw-r--r--fs/bad_inode.c2
-rw-r--r--fs/bcachefs/Kconfig85
-rw-r--r--fs/bcachefs/Makefile88
-rw-r--r--fs/bcachefs/acl.c463
-rw-r--r--fs/bcachefs/acl.h60
-rw-r--r--fs/bcachefs/alloc_background.c2146
-rw-r--r--fs/bcachefs/alloc_background.h258
-rw-r--r--fs/bcachefs/alloc_foreground.c1576
-rw-r--r--fs/bcachefs/alloc_foreground.h224
-rw-r--r--fs/bcachefs/alloc_types.h126
-rw-r--r--fs/bcachefs/backpointers.c868
-rw-r--r--fs/bcachefs/backpointers.h131
-rw-r--r--fs/bcachefs/bbpos.h48
-rw-r--r--fs/bcachefs/bcachefs.h1156
-rw-r--r--fs/bcachefs/bcachefs_format.h2413
-rw-r--r--fs/bcachefs/bcachefs_ioctl.h368
-rw-r--r--fs/bcachefs/bkey.c1120
-rw-r--r--fs/bcachefs/bkey.h782
-rw-r--r--fs/bcachefs/bkey_buf.h61
-rw-r--r--fs/bcachefs/bkey_cmp.h129
-rw-r--r--fs/bcachefs/bkey_methods.c458
-rw-r--r--fs/bcachefs/bkey_methods.h188
-rw-r--r--fs/bcachefs/bkey_sort.c201
-rw-r--r--fs/bcachefs/bkey_sort.h54
-rw-r--r--fs/bcachefs/bset.c1592
-rw-r--r--fs/bcachefs/bset.h541
-rw-r--r--fs/bcachefs/btree_cache.c1202
-rw-r--r--fs/bcachefs/btree_cache.h130
-rw-r--r--fs/bcachefs/btree_gc.c2111
-rw-r--r--fs/bcachefs/btree_gc.h114
-rw-r--r--fs/bcachefs/btree_io.c2223
-rw-r--r--fs/bcachefs/btree_io.h228
-rw-r--r--fs/bcachefs/btree_iter.c3215
-rw-r--r--fs/bcachefs/btree_iter.h939
-rw-r--r--fs/bcachefs/btree_journal_iter.c531
-rw-r--r--fs/bcachefs/btree_journal_iter.h57
-rw-r--r--fs/bcachefs/btree_key_cache.c1072
-rw-r--r--fs/bcachefs/btree_key_cache.h48
-rw-r--r--fs/bcachefs/btree_locking.c791
-rw-r--r--fs/bcachefs/btree_locking.h423
-rw-r--r--fs/bcachefs/btree_trans_commit.c1150
-rw-r--r--fs/bcachefs/btree_types.h739
-rw-r--r--fs/bcachefs/btree_update.c933
-rw-r--r--fs/bcachefs/btree_update.h340
-rw-r--r--fs/bcachefs/btree_update_interior.c2480
-rw-r--r--fs/bcachefs/btree_update_interior.h337
-rw-r--r--fs/bcachefs/btree_write_buffer.c375
-rw-r--r--fs/bcachefs/btree_write_buffer.h14
-rw-r--r--fs/bcachefs/btree_write_buffer_types.h44
-rw-r--r--fs/bcachefs/buckets.c2106
-rw-r--r--fs/bcachefs/buckets.h443
-rw-r--r--fs/bcachefs/buckets_types.h92
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.c166
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.h15
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal_types.h23
-rw-r--r--fs/bcachefs/chardev.c784
-rw-r--r--fs/bcachefs/chardev.h31
-rw-r--r--fs/bcachefs/checksum.c804
-rw-r--r--fs/bcachefs/checksum.h213
-rw-r--r--fs/bcachefs/clock.c193
-rw-r--r--fs/bcachefs/clock.h38
-rw-r--r--fs/bcachefs/clock_types.h37
-rw-r--r--fs/bcachefs/compress.c710
-rw-r--r--fs/bcachefs/compress.h55
-rw-r--r--fs/bcachefs/counters.c107
-rw-r--r--fs/bcachefs/counters.h17
-rw-r--r--fs/bcachefs/darray.h87
-rw-r--r--fs/bcachefs/data_update.c558
-rw-r--r--fs/bcachefs/data_update.h43
-rw-r--r--fs/bcachefs/debug.c954
-rw-r--r--fs/bcachefs/debug.h32
-rw-r--r--fs/bcachefs/dirent.c587
-rw-r--r--fs/bcachefs/dirent.h70
-rw-r--r--fs/bcachefs/disk_groups.c550
-rw-r--r--fs/bcachefs/disk_groups.h106
-rw-r--r--fs/bcachefs/ec.c1966
-rw-r--r--fs/bcachefs/ec.h260
-rw-r--r--fs/bcachefs/ec_types.h41
-rw-r--r--fs/bcachefs/errcode.c68
-rw-r--r--fs/bcachefs/errcode.h265
-rw-r--r--fs/bcachefs/error.c293
-rw-r--r--fs/bcachefs/error.h206
-rw-r--r--fs/bcachefs/extent_update.c173
-rw-r--r--fs/bcachefs/extent_update.h12
-rw-r--r--fs/bcachefs/extents.c1403
-rw-r--r--fs/bcachefs/extents.h758
-rw-r--r--fs/bcachefs/extents_types.h40
-rw-r--r--fs/bcachefs/eytzinger.h281
-rw-r--r--fs/bcachefs/fifo.h127
-rw-r--r--fs/bcachefs/fs-common.c501
-rw-r--r--fs/bcachefs/fs-common.h43
-rw-r--r--fs/bcachefs/fs-io-buffered.c1093
-rw-r--r--fs/bcachefs/fs-io-buffered.h27
-rw-r--r--fs/bcachefs/fs-io-direct.c679
-rw-r--r--fs/bcachefs/fs-io-direct.h16
-rw-r--r--fs/bcachefs/fs-io-pagecache.c791
-rw-r--r--fs/bcachefs/fs-io-pagecache.h176
-rw-r--r--fs/bcachefs/fs-io.c1072
-rw-r--r--fs/bcachefs/fs-io.h184
-rw-r--r--fs/bcachefs/fs-ioctl.c572
-rw-r--r--fs/bcachefs/fs-ioctl.h81
-rw-r--r--fs/bcachefs/fs.c1980
-rw-r--r--fs/bcachefs/fs.h209
-rw-r--r--fs/bcachefs/fsck.c2417
-rw-r--r--fs/bcachefs/fsck.h14
-rw-r--r--fs/bcachefs/inode.c1133
-rw-r--r--fs/bcachefs/inode.h207
-rw-r--r--fs/bcachefs/io_misc.c515
-rw-r--r--fs/bcachefs/io_misc.h34
-rw-r--r--fs/bcachefs/io_read.c1210
-rw-r--r--fs/bcachefs/io_read.h158
-rw-r--r--fs/bcachefs/io_write.c1671
-rw-r--r--fs/bcachefs/io_write.h110
-rw-r--r--fs/bcachefs/io_write_types.h96
-rw-r--r--fs/bcachefs/journal.c1449
-rw-r--r--fs/bcachefs/journal.h548
-rw-r--r--fs/bcachefs/journal_io.c1894
-rw-r--r--fs/bcachefs/journal_io.h65
-rw-r--r--fs/bcachefs/journal_reclaim.c876
-rw-r--r--fs/bcachefs/journal_reclaim.h87
-rw-r--r--fs/bcachefs/journal_sb.c219
-rw-r--r--fs/bcachefs/journal_sb.h24
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c320
-rw-r--r--fs/bcachefs/journal_seq_blacklist.h22
-rw-r--r--fs/bcachefs/journal_types.h345
-rw-r--r--fs/bcachefs/keylist.c52
-rw-r--r--fs/bcachefs/keylist.h74
-rw-r--r--fs/bcachefs/keylist_types.h16
-rw-r--r--fs/bcachefs/logged_ops.c112
-rw-r--r--fs/bcachefs/logged_ops.h20
-rw-r--r--fs/bcachefs/lru.c162
-rw-r--r--fs/bcachefs/lru.h69
-rw-r--r--fs/bcachefs/mean_and_variance.c159
-rw-r--r--fs/bcachefs/mean_and_variance.h198
-rw-r--r--fs/bcachefs/mean_and_variance_test.c240
-rw-r--r--fs/bcachefs/migrate.c179
-rw-r--r--fs/bcachefs/migrate.h7
-rw-r--r--fs/bcachefs/move.c1159
-rw-r--r--fs/bcachefs/move.h96
-rw-r--r--fs/bcachefs/move_types.h36
-rw-r--r--fs/bcachefs/movinggc.c414
-rw-r--r--fs/bcachefs/movinggc.h12
-rw-r--r--fs/bcachefs/nocow_locking.c144
-rw-r--r--fs/bcachefs/nocow_locking.h50
-rw-r--r--fs/bcachefs/nocow_locking_types.h20
-rw-r--r--fs/bcachefs/opts.c605
-rw-r--r--fs/bcachefs/opts.h564
-rw-r--r--fs/bcachefs/printbuf.c425
-rw-r--r--fs/bcachefs/printbuf.h284
-rw-r--r--fs/bcachefs/quota.c978
-rw-r--r--fs/bcachefs/quota.h74
-rw-r--r--fs/bcachefs/quota_types.h43
-rw-r--r--fs/bcachefs/rebalance.c366
-rw-r--r--fs/bcachefs/rebalance.h28
-rw-r--r--fs/bcachefs/rebalance_types.h26
-rw-r--r--fs/bcachefs/recovery.c1049
-rw-r--r--fs/bcachefs/recovery.h33
-rw-r--r--fs/bcachefs/recovery_types.h49
-rw-r--r--fs/bcachefs/reflink.c405
-rw-r--r--fs/bcachefs/reflink.h81
-rw-r--r--fs/bcachefs/replicas.c1058
-rw-r--r--fs/bcachefs/replicas.h91
-rw-r--r--fs/bcachefs/replicas_types.h27
-rw-r--r--fs/bcachefs/sb-clean.c395
-rw-r--r--fs/bcachefs/sb-clean.h16
-rw-r--r--fs/bcachefs/sb-members.c339
-rw-r--r--fs/bcachefs/sb-members.h182
-rw-r--r--fs/bcachefs/seqmutex.h48
-rw-r--r--fs/bcachefs/siphash.c173
-rw-r--r--fs/bcachefs/siphash.h87
-rw-r--r--fs/bcachefs/six.c913
-rw-r--r--fs/bcachefs/six.h393
-rw-r--r--fs/bcachefs/snapshot.c1689
-rw-r--r--fs/bcachefs/snapshot.h270
-rw-r--r--fs/bcachefs/str_hash.h370
-rw-r--r--fs/bcachefs/subvolume.c450
-rw-r--r--fs/bcachefs/subvolume.h35
-rw-r--r--fs/bcachefs/subvolume_types.h31
-rw-r--r--fs/bcachefs/super-io.c1258
-rw-r--r--fs/bcachefs/super-io.h124
-rw-r--r--fs/bcachefs/super.c2022
-rw-r--r--fs/bcachefs/super.h52
-rw-r--r--fs/bcachefs/super_types.h52
-rw-r--r--fs/bcachefs/sysfs.c1031
-rw-r--r--fs/bcachefs/sysfs.h48
-rw-r--r--fs/bcachefs/tests.c919
-rw-r--r--fs/bcachefs/tests.h15
-rw-r--r--fs/bcachefs/trace.c16
-rw-r--r--fs/bcachefs/trace.h1284
-rw-r--r--fs/bcachefs/two_state_shared_lock.c8
-rw-r--r--fs/bcachefs/two_state_shared_lock.h59
-rw-r--r--fs/bcachefs/util.c1141
-rw-r--r--fs/bcachefs/util.h852
-rw-r--r--fs/bcachefs/varint.c129
-rw-r--r--fs/bcachefs/varint.h11
-rw-r--r--fs/bcachefs/vstructs.h63
-rw-r--r--fs/bcachefs/xattr.c651
-rw-r--r--fs/bcachefs/xattr.h50
-rw-r--r--fs/befs/linuxvfs.c10
-rw-r--r--fs/bfs/dir.c9
-rw-r--r--fs/bfs/inode.c12
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/btrfs/Kconfig21
-rw-r--r--fs/btrfs/Makefile3
-rw-r--r--fs/btrfs/accessors.h16
-rw-r--r--fs/btrfs/async-thread.c12
-rw-r--r--fs/btrfs/async-thread.h6
-rw-r--r--fs/btrfs/backref.c19
-rw-r--r--fs/btrfs/backref.h13
-rw-r--r--fs/btrfs/bio.c47
-rw-r--r--fs/btrfs/block-group.c174
-rw-r--r--fs/btrfs/block-rsv.c24
-rw-r--r--fs/btrfs/btrfs_inode.h80
-rw-r--r--fs/btrfs/check-integrity.c2871
-rw-r--r--fs/btrfs/check-integrity.h20
-rw-r--r--fs/btrfs/compression.c6
-rw-r--r--fs/btrfs/ctree.c361
-rw-r--r--fs/btrfs/ctree.h145
-rw-r--r--fs/btrfs/defrag.c152
-rw-r--r--fs/btrfs/defrag.h2
-rw-r--r--fs/btrfs/delalloc-space.c6
-rw-r--r--fs/btrfs/delayed-inode.c47
-rw-r--r--fs/btrfs/delayed-inode.h1
-rw-r--r--fs/btrfs/delayed-ref.c199
-rw-r--r--fs/btrfs/delayed-ref.h70
-rw-r--r--fs/btrfs/dev-replace.c17
-rw-r--r--fs/btrfs/dir-item.c8
-rw-r--r--fs/btrfs/dir-item.h9
-rw-r--r--fs/btrfs/disk-io.c142
-rw-r--r--fs/btrfs/disk-io.h3
-rw-r--r--fs/btrfs/extent-io-tree.c272
-rw-r--r--fs/btrfs/extent-io-tree.h7
-rw-r--r--fs/btrfs/extent-tree.c536
-rw-r--r--fs/btrfs/extent-tree.h15
-rw-r--r--fs/btrfs/extent_io.c39
-rw-r--r--fs/btrfs/extent_io.h4
-rw-r--r--fs/btrfs/file-item.c17
-rw-r--r--fs/btrfs/file.c79
-rw-r--r--fs/btrfs/free-space-cache.c28
-rw-r--r--fs/btrfs/free-space-tree.c17
-rw-r--r--fs/btrfs/fs.h69
-rw-r--r--fs/btrfs/inode-item.c21
-rw-r--r--fs/btrfs/inode-item.h8
-rw-r--r--fs/btrfs/inode.c226
-rw-r--r--fs/btrfs/ioctl.c41
-rw-r--r--fs/btrfs/locking.c20
-rw-r--r--fs/btrfs/messages.c32
-rw-r--r--fs/btrfs/messages.h14
-rw-r--r--fs/btrfs/ordered-data.c127
-rw-r--r--fs/btrfs/ordered-data.h17
-rw-r--r--fs/btrfs/print-tree.c35
-rw-r--r--fs/btrfs/props.c1
-rw-r--r--fs/btrfs/qgroup.c872
-rw-r--r--fs/btrfs/qgroup.h149
-rw-r--r--fs/btrfs/raid-stripe-tree.c274
-rw-r--r--fs/btrfs/raid-stripe-tree.h50
-rw-r--r--fs/btrfs/ref-verify.c9
-rw-r--r--fs/btrfs/reflink.c5
-rw-r--r--fs/btrfs/relocation.c215
-rw-r--r--fs/btrfs/relocation.h9
-rw-r--r--fs/btrfs/root-tree.c12
-rw-r--r--fs/btrfs/root-tree.h8
-rw-r--r--fs/btrfs/scrub.c78
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/space-info.c64
-rw-r--r--fs/btrfs/space-info.h3
-rw-r--r--fs/btrfs/super.c87
-rw-r--r--fs/btrfs/sysfs.c53
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c6
-rw-r--r--fs/btrfs/tests/inode-tests.c12
-rw-r--r--fs/btrfs/transaction.c232
-rw-r--r--fs/btrfs/transaction.h20
-rw-r--r--fs/btrfs/tree-checker.c48
-rw-r--r--fs/btrfs/tree-log.c93
-rw-r--r--fs/btrfs/ulist.c3
-rw-r--r--fs/btrfs/uuid-tree.c6
-rw-r--r--fs/btrfs/verity.c4
-rw-r--r--fs/btrfs/volumes.c526
-rw-r--r--fs/btrfs/volumes.h45
-rw-r--r--fs/btrfs/xattr.c14
-rw-r--r--fs/btrfs/xattr.h2
-rw-r--r--fs/btrfs/zoned.c452
-rw-r--r--fs/btrfs/zstd.c11
-rw-r--r--fs/ceph/addr.c10
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/crypto.c1
-rw-r--r--fs/ceph/file.c2
-rw-r--r--fs/ceph/inode.c64
-rw-r--r--fs/ceph/mds_client.c10
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c2
-rw-r--r--fs/char_dev.c2
-rw-r--r--fs/coda/coda_linux.c6
-rw-r--r--fs/coda/dir.c2
-rw-r--r--fs/coda/file.c2
-rw-r--r--fs/configfs/inode.c8
-rw-r--r--fs/cramfs/inode.c6
-rw-r--r--fs/crypto/bio.c39
-rw-r--r--fs/crypto/crypto.c163
-rw-r--r--fs/crypto/fname.c6
-rw-r--r--fs/crypto/fscrypt_private.h164
-rw-r--r--fs/crypto/hooks.c4
-rw-r--r--fs/crypto/inline_crypt.c32
-rw-r--r--fs/crypto/keyring.c82
-rw-r--r--fs/crypto/keysetup.c62
-rw-r--r--fs/crypto/keysetup_v1.c20
-rw-r--r--fs/crypto/policy.c83
-rw-r--r--fs/dcache.c12
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/devpts/inode.c6
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ecryptfs/inode.c2
-rw-r--r--fs/efivarfs/file.c2
-rw-r--r--fs/efivarfs/inode.c2
-rw-r--r--fs/efs/inode.c5
-rw-r--r--fs/erofs/data.c4
-rw-r--r--fs/erofs/inode.c3
-rw-r--r--fs/erofs/internal.h2
-rw-r--r--fs/erofs/super.c20
-rw-r--r--fs/erofs/xattr.c2
-rw-r--r--fs/erofs/xattr.h4
-rw-r--r--fs/exfat/exfat_fs.h1
-rw-r--r--fs/exfat/file.c7
-rw-r--r--fs/exfat/inode.c31
-rw-r--r--fs/exfat/misc.c8
-rw-r--r--fs/exfat/namei.c31
-rw-r--r--fs/exfat/super.c4
-rw-r--r--fs/ext2/dir.c6
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c13
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext2/xattr.c4
-rw-r--r--fs/ext2/xattr.h2
-rw-r--r--fs/ext4/crypto.c13
-rw-r--r--fs/ext4/ext4.h22
-rw-r--r--fs/ext4/extents.c11
-rw-r--r--fs/ext4/fsmap.c9
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/inline.c4
-rw-r--r--fs/ext4/inode.c19
-rw-r--r--fs/ext4/ioctl.c13
-rw-r--r--fs/ext4/namei.c10
-rw-r--r--fs/ext4/super.c54
-rw-r--r--fs/ext4/xattr.c10
-rw-r--r--fs/ext4/xattr.h2
-rw-r--r--fs/f2fs/dir.c6
-rw-r--r--fs/f2fs/f2fs.h11
-rw-r--r--fs/f2fs/file.c14
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/inode.c24
-rw-r--r--fs/f2fs/namei.c4
-rw-r--r--fs/f2fs/recovery.c8
-rw-r--r--fs/f2fs/super.c28
-rw-r--r--fs/f2fs/xattr.c4
-rw-r--r--fs/f2fs/xattr.h2
-rw-r--r--fs/fat/inode.c25
-rw-r--r--fs/fat/misc.c6
-rw-r--r--fs/file.c153
-rw-r--r--fs/file_table.c49
-rw-r--r--fs/freevxfs/vxfs_inode.c6
-rw-r--r--fs/fs-writeback.c41
-rw-r--r--fs/fsopen.c1
-rw-r--r--fs/fuse/control.c2
-rw-r--r--fs/fuse/dir.c10
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c29
-rw-r--r--fs/fuse/readdir.c6
-rw-r--r--fs/fuse/xattr.c2
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/dir.c10
-rw-r--r--fs/gfs2/glock.c11
-rw-r--r--fs/gfs2/glops.c11
-rw-r--r--fs/gfs2/inode.c7
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/gfs2/super.c12
-rw-r--r--fs/gfs2/super.h4
-rw-r--r--fs/gfs2/xattr.c4
-rw-r--r--fs/hfs/attr.c2
-rw-r--r--fs/hfs/catalog.c8
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfs/inode.c16
-rw-r--r--fs/hfs/sysdep.c10
-rw-r--r--fs/hfsplus/catalog.c8
-rw-r--r--fs/hfsplus/inode.c22
-rw-r--r--fs/hfsplus/xattr.c2
-rw-r--r--fs/hfsplus/xattr.h2
-rw-r--r--fs/hostfs/hostfs_kern.c12
-rw-r--r--fs/hpfs/dir.c12
-rw-r--r--fs/hpfs/inode.c16
-rw-r--r--fs/hpfs/namei.c22
-rw-r--r--fs/hpfs/super.c10
-rw-r--r--fs/hugetlbfs/inode.c10
-rw-r--r--fs/init.c6
-rw-r--r--fs/inode.c43
-rw-r--r--fs/internal.h22
-rw-r--r--fs/iomap/buffered-io.c10
-rw-r--r--fs/isofs/inode.c4
-rw-r--r--fs/isofs/rock.c18
-rw-r--r--fs/jffs2/dir.c35
-rw-r--r--fs/jffs2/file.c4
-rw-r--r--fs/jffs2/fs.c20
-rw-r--r--fs/jffs2/os-linux.h4
-rw-r--r--fs/jffs2/xattr.c2
-rw-r--r--fs/jffs2/xattr.h2
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/jfs/jfs_imap.c20
-rw-r--r--fs/jfs/jfs_inode.c4
-rw-r--r--fs/jfs/jfs_logmgr.c33
-rw-r--r--fs/jfs/jfs_logmgr.h2
-rw-r--r--fs/jfs/jfs_mount.c3
-rw-r--r--fs/jfs/jfs_xattr.h2
-rw-r--r--fs/jfs/namei.c20
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/kernfs/inode.c8
-rw-r--r--fs/kernfs/kernfs-internal.h2
-rw-r--r--fs/libfs.c41
-rw-r--r--fs/lockd/svc.c7
-rw-r--r--fs/lockd/svclock.c43
-rw-r--r--fs/locks.c12
-rw-r--r--fs/minix/bitmap.c2
-rw-r--r--fs/minix/dir.c6
-rw-r--r--fs/minix/inode.c17
-rw-r--r--fs/minix/itree_common.c2
-rw-r--r--fs/namei.c40
-rw-r--r--fs/namespace.c40
-rw-r--r--fs/nfs/blocklayout/blocklayout.h2
-rw-r--r--fs/nfs/blocklayout/dev.c76
-rw-r--r--fs/nfs/callback.c46
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c17
-rw-r--r--fs/nfs/fscache.h4
-rw-r--r--fs/nfs/inode.c30
-rw-r--r--fs/nfs/nfs.h2
-rw-r--r--fs/nfs/nfs42proc.c3
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/pnfs.c33
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/Makefile3
-rw-r--r--fs/nfsd/blocklayout.c3
-rw-r--r--fs/nfsd/blocklayoutxdr.c6
-rw-r--r--fs/nfsd/blocklayoutxdr.h4
-rw-r--r--fs/nfsd/export.c32
-rw-r--r--fs/nfsd/export.h4
-rw-r--r--fs/nfsd/filecache.c27
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c6
-rw-r--r--fs/nfsd/flexfilelayoutxdr.h4
-rw-r--r--fs/nfsd/netlink.c32
-rw-r--r--fs/nfsd/netlink.h22
-rw-r--r--fs/nfsd/nfs3proc.c9
-rw-r--r--fs/nfsd/nfs4callback.c97
-rw-r--r--fs/nfsd/nfs4layouts.c6
-rw-r--r--fs/nfsd/nfs4proc.c40
-rw-r--r--fs/nfsd/nfs4state.c156
-rw-r--r--fs/nfsd/nfs4xdr.c2636
-rw-r--r--fs/nfsd/nfsctl.c205
-rw-r--r--fs/nfsd/nfsd.h17
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfsfh.h3
-rw-r--r--fs/nfsd/nfssvc.c42
-rw-r--r--fs/nfsd/pnfs.h6
-rw-r--r--fs/nfsd/state.h27
-rw-r--r--fs/nfsd/stats.c4
-rw-r--r--fs/nfsd/stats.h18
-rw-r--r--fs/nfsd/trace.h87
-rw-r--r--fs/nfsd/vfs.c75
-rw-r--r--fs/nfsd/vfs.h4
-rw-r--r--fs/nfsd/xdr4.h154
-rw-r--r--fs/nfsd/xdr4cb.h18
-rw-r--r--fs/nilfs2/dir.c6
-rw-r--r--fs/nilfs2/inode.c20
-rw-r--r--fs/notify/dnotify/dnotify.c6
-rw-r--r--fs/notify/fanotify/fanotify_user.c25
-rw-r--r--fs/nsfs.c2
-rw-r--r--fs/ntfs/inode.c25
-rw-r--r--fs/ntfs/mft.c2
-rw-r--r--fs/ntfs3/attrib.c12
-rw-r--r--fs/ntfs3/attrlist.c15
-rw-r--r--fs/ntfs3/bitmap.c4
-rw-r--r--fs/ntfs3/dir.c6
-rw-r--r--fs/ntfs3/file.c10
-rw-r--r--fs/ntfs3/frecord.c19
-rw-r--r--fs/ntfs3/fslog.c6
-rw-r--r--fs/ntfs3/fsntfs.c19
-rw-r--r--fs/ntfs3/index.c3
-rw-r--r--fs/ntfs3/inode.c24
-rw-r--r--fs/ntfs3/namei.c6
-rw-r--r--fs/ntfs3/ntfs.h2
-rw-r--r--fs/ntfs3/ntfs_fs.h6
-rw-r--r--fs/ntfs3/record.c74
-rw-r--r--fs/ntfs3/super.c104
-rw-r--r--fs/ntfs3/xattr.c9
-rw-r--r--fs/ocfs2/acl.c4
-rw-r--r--fs/ocfs2/alloc.c6
-rw-r--r--fs/ocfs2/aops.c6
-rw-r--r--fs/ocfs2/cluster/heartbeat.c81
-rw-r--r--fs/ocfs2/dir.c9
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmglue.c29
-rw-r--r--fs/ocfs2/file.c30
-rw-r--r--fs/ocfs2/inode.c28
-rw-r--r--fs/ocfs2/move_extents.c4
-rw-r--r--fs/ocfs2/namei.c16
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/xattr.c8
-rw-r--r--fs/ocfs2/xattr.h2
-rw-r--r--fs/omfs/inode.c12
-rw-r--r--fs/open.c52
-rw-r--r--fs/openpromfs/inode.c4
-rw-r--r--fs/orangefs/orangefs-kernel.h2
-rw-r--r--fs/orangefs/orangefs-utils.c16
-rw-r--r--fs/orangefs/xattr.c2
-rw-r--r--fs/overlayfs/file.c9
-rw-r--r--fs/overlayfs/inode.c3
-rw-r--r--fs/overlayfs/super.c28
-rw-r--r--fs/overlayfs/util.c4
-rw-r--r--fs/pipe.c66
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/fd.c11
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/nommu.c2
-rw-r--r--fs/proc/proc_sysctl.c2
-rw-r--r--fs/proc/self.c2
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/proc/thread_self.c2
-rw-r--r--fs/pstore/inode.c5
-rw-r--r--fs/qnx4/inode.c6
-rw-r--r--fs/qnx6/inode.c6
-rw-r--r--fs/ramfs/inode.c7
-rw-r--r--fs/reiserfs/inode.c26
-rw-r--r--fs/reiserfs/journal.c56
-rw-r--r--fs/reiserfs/namei.c8
-rw-r--r--fs/reiserfs/procfs.c2
-rw-r--r--fs/reiserfs/reiserfs.h13
-rw-r--r--fs/reiserfs/stree.c5
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/romfs/super.c5
-rw-r--r--fs/smb/client/cifsfs.h2
-rw-r--r--fs/smb/client/file.c18
-rw-r--r--fs/smb/client/fscache.h6
-rw-r--r--fs/smb/client/inode.c17
-rw-r--r--fs/smb/client/smb2ops.c6
-rw-r--r--fs/smb/client/xattr.c2
-rw-r--r--fs/smb/server/smb2pdu.c8
-rw-r--r--fs/squashfs/inode.c6
-rw-r--r--fs/squashfs/squashfs.h2
-rw-r--r--fs/squashfs/xattr.c2
-rw-r--r--fs/stack.c4
-rw-r--r--fs/stat.c4
-rw-r--r--fs/super.c66
-rw-r--r--fs/sysv/dir.c6
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/inode.c12
-rw-r--r--fs/sysv/itree.c2
-rw-r--r--fs/tracefs/inode.c2
-rw-r--r--fs/ubifs/crypto.c3
-rw-r--r--fs/ubifs/debug.c12
-rw-r--r--fs/ubifs/dir.c23
-rw-r--r--fs/ubifs/file.c16
-rw-r--r--fs/ubifs/journal.c12
-rw-r--r--fs/ubifs/super.c8
-rw-r--r--fs/ubifs/ubifs.h2
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/udf/ialloc.c4
-rw-r--r--fs/udf/inode.c38
-rw-r--r--fs/udf/namei.c16
-rw-r--r--fs/ufs/dir.c6
-rw-r--r--fs/ufs/ialloc.c2
-rw-r--r--fs/ufs/inode.c42
-rw-r--r--fs/vboxsf/utils.c15
-rw-r--r--fs/xattr.c6
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c6
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c7
-rw-r--r--fs/xfs/xfs_buf.c22
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_inode_item.c4
-rw-r--r--fs/xfs/xfs_iops.c8
-rw-r--r--fs/xfs/xfs_itable.c12
-rw-r--r--fs/xfs/xfs_rtalloc.c30
-rw-r--r--fs/xfs/xfs_super.c42
-rw-r--r--fs/xfs/xfs_xattr.c2
-rw-r--r--fs/xfs/xfs_xattr.h2
-rw-r--r--fs/zonefs/super.c10
-rw-r--r--include/drm/gpu_scheduler.h3
-rw-r--r--include/kvm/arm_arch_timer.h7
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/closure.h (renamed from drivers/md/bcache/closure.h)46
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/exportfs.h20
-rw-r--r--include/linux/fdtable.h17
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h12
-rw-r--r--include/linux/fs.h133
-rw-r--r--include/linux/fs_stack.h6
-rw-r--r--include/linux/fscrypt.h82
-rw-r--r--include/linux/fsnotify.h3
-rw-r--r--include/linux/generic-radix-tree.h68
-rw-r--r--include/linux/hugetlb.h41
-rw-r--r--include/linux/ieee80211.h29
-rw-r--r--include/linux/iov_iter.h274
-rw-r--r--include/linux/iversion.h2
-rw-r--r--include/linux/kasan.h6
-rw-r--r--include/linux/llist.h46
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lwq.h124
-rw-r--r--include/linux/mount.h4
-rw-r--r--include/linux/mtd/jedec.h3
-rw-r--r--include/linux/mtd/onfi.h1
-rw-r--r--include/linux/mtd/rawnand.h2
-rw-r--r--include/linux/namei.h26
-rw-r--r--include/linux/nfs4.h262
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pipe_fs_i.h22
-rw-r--r--include/linux/pktcdvd.h4
-rw-r--r--include/linux/pseudo_fs.h2
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/string_helpers.h4
-rw-r--r--include/linux/sunrpc/svc.h45
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/uio.h34
-rw-r--r--include/linux/virtio_net.h19
-rw-r--r--include/linux/watch_queue.h2
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/net/netns/xfrm.h1
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/scsi/scsi_device.h20
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/trace/events/btrfs.h83
-rw-r--r--include/trace/events/neigh.h4
-rw-r--r--include/trace/events/rpcrdma.h10
-rw-r--r--include/trace/events/sunrpc.h1
-rw-r--r--include/uapi/drm/nouveau_drm.h4
-rw-r--r--include/uapi/linux/btrfs.h3
-rw-r--r--include/uapi/linux/btrfs_tree.h60
-rw-r--r--include/uapi/linux/fscrypt.h3
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--include/uapi/linux/nfsd_netlink.h39
-rw-r--r--include/video/mmp_disp.h2
-rw-r--r--include/video/uvesafb.h2
-rw-r--r--init/do_mounts.c2
-rw-r--r--init/init_task.c1
-rw-r--r--io_uring/fdinfo.c18
-rw-r--r--io_uring/io_uring.c6
-rw-r--r--io_uring/openclose.c9
-rw-r--r--io_uring/rw.c11
-rw-r--r--ipc/mqueue.c19
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/auditsc.c8
-rw-r--r--kernel/bpf/inode.c5
-rw-r--r--kernel/bpf/task_iter.c4
-rw-r--r--kernel/dma/swiotlb.c5
-rw-r--r--kernel/events/core.c40
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/kcmp.c4
-rw-r--r--kernel/locking/mutex.c3
-rw-r--r--kernel/power/hibernate.c14
-rw-r--r--kernel/power/power.h2
-rw-r--r--kernel/power/swap.c37
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/stacktrace.c2
-rw-r--r--kernel/trace/fprobe.c6
-rw-r--r--kernel/trace/trace_kprobe.c83
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Kconfig.debug9
-rw-r--r--lib/Makefile4
-rw-r--r--lib/closure.c (renamed from drivers/md/bcache/closure.c)36
-rw-r--r--lib/errname.c1
-rw-r--r--lib/generic-radix-tree.c76
-rw-r--r--lib/iov_iter.c437
-rw-r--r--lib/llist.c28
-rw-r--r--lib/lwq.c158
-rw-r--r--lib/maple_tree.c2
-rw-r--r--lib/string_helpers.c10
-rw-r--r--lib/test_maple_tree.c35
-rw-r--r--mm/damon/sysfs.c7
-rw-r--r--mm/hugetlb.c82
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory.c13
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c46
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/readahead.c3
-rw-r--r--mm/shmem.c22
-rw-r--r--mm/slab_common.c7
-rw-r--r--mm/swapfile.c23
-rw-r--r--mm/zswap.c4
-rw-r--r--net/bluetooth/hci_conn.c9
-rw-r--r--net/bluetooth/hci_event.c48
-rw-r--r--net/bluetooth/hci_sock.c3
-rw-r--r--net/bluetooth/hci_sync.c26
-rw-r--r--net/core/datagram.c75
-rw-r--r--net/core/dev.c65
-rw-r--r--net/core/dev.h3
-rw-r--r--net/core/neighbour.c67
-rw-r--r--net/core/pktgen.c14
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c40
-rw-r--r--net/core/stream.c12
-rw-r--r--net/ethtool/bitset.c32
-rw-r--r--net/handshake/netlink.c30
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/esp4.c6
-rw-r--r--net/ipv4/fib_semantics.c14
-rw-r--r--net/ipv4/inet_connection_sock.c1
-rw-r--r--net/ipv4/inet_hashtables.c24
-rw-r--r--net/ipv4/tcp.c24
-rw-r--r--net/ipv4/tcp_bpf.c12
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv4/tcp_output.c25
-rw-r--r--net/ipv4/tcp_recovery.c2
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/mac80211/key.c3
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mptcp/protocol.c43
-rw-r--r--net/netfilter/nf_flow_table_core.c14
-rw-r--r--net/netfilter/nf_tables_api.c70
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nft_inner.c1
-rw-r--r--net/netfilter/nft_payload.c2
-rw-r--r--net/netfilter/nft_set_pipapo.h2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/nfc/nci/spi.c2
-rw-r--r--net/rfkill/core.c5
-rw-r--r--net/rfkill/rfkill-gpio.c4
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/sched/sch_hfsc.c18
-rw-r--r--net/smc/af_smc.c5
-rw-r--r--net/smc/smc_ib.c7
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/backchannel_rqst.c13
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/svc.c155
-rw-r--r--net/sunrpc/svc_xprt.c236
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c3
-rw-r--r--net/tls/tls_main.c10
-rw-r--r--net/tls/tls_sw.c19
-rw-r--r--net/vmw_vsock/virtio_transport.c18
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/xfrm/xfrm_interface_core.c22
-rw-r--r--net/xfrm/xfrm_policy.c27
-rw-r--r--rust/Makefile16
-rw-r--r--rust/kernel/error.rs4
-rw-r--r--scripts/const_structs.checkpatch1
-rw-r--r--security/apparmor/apparmorfs.c7
-rw-r--r--security/apparmor/policy_unpack.c4
-rw-r--r--security/inode.c2
-rw-r--r--security/selinux/selinuxfs.c2
-rw-r--r--sound/core/pcm_native.c4
-rw-r--r--sound/pci/hda/patch_realtek.c27
-rw-r--r--sound/soc/codecs/cs35l56.c8
-rw-r--r--sound/soc/codecs/cs42l42-sdw.c1
-rw-r--r--sound/soc/codecs/cs42l43-jack.c2
-rw-r--r--sound/soc/codecs/da7219-aad.c11
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c4
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/tas2780.c2
-rw-r--r--sound/soc/codecs/wcd938x-sdw.c27
-rw-r--r--sound/soc/codecs/wcd938x.c76
-rw-r--r--sound/soc/dwc/dwc-i2s.c2
-rw-r--r--sound/soc/pxa/pxa-ssp.c2
-rw-r--r--sound/soc/soc-component.c1
-rw-r--r--sound/soc/soc-dapm.c12
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_32.h2
-rw-r--r--tools/build/feature/test-llvm.cpp14
-rw-r--r--tools/include/linux/rwsem.h40
-rw-r--r--tools/include/nolibc/arch-i386.h4
-rw-r--r--tools/include/nolibc/crt.h1
-rw-r--r--tools/net/ynl/Makefile.deps1
-rw-r--r--tools/net/ynl/generated/Makefile2
-rw-r--r--tools/net/ynl/generated/devlink-user.c54
-rw-r--r--tools/net/ynl/generated/nfsd-user.c95
-rw-r--r--tools/net/ynl/generated/nfsd-user.h33
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v0.c12
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v2.c12
-rw-r--r--tools/perf/util/dlfilter.c32
-rw-r--r--tools/perf/util/pmu.c8
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc13
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h23
-rw-r--r--tools/testing/selftests/kvm/lib/guest_sprintf.c7
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/apic.c2
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c9
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c2
-rwxr-xr-xtools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh1
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c110
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c4
-rw-r--r--tools/testing/selftests/mm/mremap_dontunmap.c1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh7
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh21
-rwxr-xr-xtools/testing/selftests/net/netns-name.sh87
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh21
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py48
-rwxr-xr-xtools/testing/selftests/netfilter/nft_audit.sh52
-rw-r--r--tools/testing/selftests/user_events/abi_test.c16
-rw-r--r--tools/virtio/linux/dma-mapping.h12
1178 files changed, 111716 insertions, 11715 deletions
diff --git a/.mailmap b/.mailmap
index c80903efec75..2643b7203a74 100644
--- a/.mailmap
+++ b/.mailmap
@@ -87,6 +87,7 @@ Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
+Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
Ben Gardner <bgardner@wabtec.com>
@@ -450,9 +451,10 @@ Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
-Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
-Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
+Oleksij Rempel <o.rempel@pengutronix.de>
+Oleksij Rempel <o.rempel@pengutronix.de> <ore@pengutronix.de>
Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
+Ondřej Jirman <megi@xff.cz> <megous@megous.com>
Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org>
Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
diff --git a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
index 0a41afe0ab4c..9051695d2211 100644
--- a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
+++ b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
@@ -1,7 +1,7 @@
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <russell.h.weight@intel.com>
+Contact: Peter Colberg <peter.colberg@intel.com>
Description: Read only. Returns the root entry hash for the static
region if one is programmed, else it returns the
string: "hash not programmed". This file is only
@@ -11,7 +11,7 @@ Description: Read only. Returns the root entry hash for the static
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <russell.h.weight@intel.com>
+Contact: Peter Colberg <peter.colberg@intel.com>
Description: Read only. Returns the root entry hash for the partial
reconfiguration region if one is programmed, else it
returns the string: "hash not programmed". This file
@@ -21,7 +21,7 @@ Description: Read only. Returns the root entry hash for the partial
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_root_entry_hash
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <russell.h.weight@intel.com>
+Contact: Peter Colberg <peter.colberg@intel.com>
Description: Read only. Returns the root entry hash for the BMC image
if one is programmed, else it returns the string:
"hash not programmed". This file is only visible if the
@@ -31,7 +31,7 @@ Description: Read only. Returns the root entry hash for the BMC image
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/sr_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <russell.h.weight@intel.com>
+Contact: Peter Colberg <peter.colberg@intel.com>
Description: Read only. Returns a list of indices for canceled code
signing keys for the static region. The standard bitmap
list format is used (e.g. "1,2-6,9").
@@ -39,7 +39,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/pr_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <russell.h.weight@intel.com>
+Contact: Peter Colberg <peter.colberg@intel.com>
Description: Read only. Returns a list of indices for canceled code
signing keys for the partial reconfiguration region. The
standard bitmap list format is used (e.g. "1,2-6,9").
@@ -47,7 +47,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/bmc_canceled_csks
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <russell.h.weight@intel.com>
+Contact: Peter Colberg <peter.colberg@intel.com>
Description: Read only. Returns a list of indices for canceled code
signing keys for the BMC. The standard bitmap list format
is used (e.g. "1,2-6,9").
@@ -55,7 +55,7 @@ Description: Read only. Returns a list of indices for canceled code
What: /sys/bus/platform/drivers/intel-m10bmc-sec-update/.../security/flash_count
Date: Sep 2022
KernelVersion: 5.20
-Contact: Russ Weight <russell.h.weight@intel.com>
+Contact: Peter Colberg <peter.colberg@intel.com>
Description: Read only. Returns number of times the secure update
staging area has been flashed.
Format: "%u".
diff --git a/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml
index 2594fa192f93..2a04906531fb 100644
--- a/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml
+++ b/Documentation/devicetree/bindings/iio/addac/adi,ad74115.yaml
@@ -32,7 +32,8 @@ properties:
spi-cpol: true
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
interrupts:
minItems: 1
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
index 4e508bfcc9d8..5121685337b5 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
@@ -78,7 +78,8 @@ properties:
- const: -1000
- const: 22000
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
adi,dc-dc-ilim-microamp:
enum: [150000, 200000, 250000, 300000, 350000, 400000]
diff --git a/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml b/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml
index b9b5beac33b2..5b6cde86b5a5 100644
--- a/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml
+++ b/Documentation/devicetree/bindings/iio/health/ti,afe4403.yaml
@@ -23,7 +23,8 @@ properties:
maxItems: 1
description: Connected to ADC_RDY pin.
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
required:
- compatible
diff --git a/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml b/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
index 2958c4ca75b4..167d10bd60af 100644
--- a/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
+++ b/Documentation/devicetree/bindings/iio/health/ti,afe4404.yaml
@@ -23,7 +23,8 @@ properties:
maxItems: 1
description: Connected to ADC_RDY pin.
- reset-gpios: true
+ reset-gpios:
+ maxItems: 1
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/memory-controllers/xlnx,versal-ddrmc-edac.yaml b/Documentation/devicetree/bindings/memory-controllers/xlnx,versal-ddrmc-edac.yaml
new file mode 100644
index 000000000000..12f8e9f350bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/xlnx,versal-ddrmc-edac.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/memory-controllers/xlnx,versal-ddrmc-edac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Versal DDRMC (Integrated DDR Memory Controller)
+
+maintainers:
+ - Shubhrajyoti Datta <shubhrajyoti.datta@amd.com>
+ - Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
+
+description:
+ The integrated DDR Memory Controllers (DDRMCs) support both DDR4 and LPDDR4/
+ 4X memory interfaces. Versal DDR memory controller has an optional ECC support
+ which correct single bit ECC errors and detect double bit ECC errors.
+
+properties:
+ compatible:
+ const: xlnx,versal-ddrmc
+
+ reg:
+ items:
+ - description: DDR Memory Controller registers
+ - description: NOC registers corresponding to DDR Memory Controller
+
+ reg-names:
+ items:
+ - const: base
+ - const: noc
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ memory-controller@f6150000 {
+ compatible = "xlnx,versal-ddrmc";
+ reg = <0x0 0xf6150000 0x0 0x2000>, <0x0 0xf6070000 0x0 0x20000>;
+ reg-names = "base", "noc";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
index 80141eb7fc6b..10f34aa8ba8a 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
@@ -69,7 +69,7 @@ properties:
maxItems: 4
clocks:
- minItems: 3
+ minItems: 2
items:
- description: Main peripheral bus clock, PCLK/HCLK - AHB Bus clock
- description: SDC MMC clock, MCLK
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
index 5073007267ad..634cec5d57ea 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
@@ -70,7 +70,7 @@ examples:
phy@84000 {
compatible = "qcom,ipq6018-qmp-pcie-phy";
- reg = <0x0 0x00084000 0x0 0x1000>;
+ reg = <0x00084000 0x1000>;
clocks = <&gcc GCC_PCIE0_AUX_CLK>,
<&gcc GCC_PCIE0_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
index 7a6de938b11d..4118aa54bbd5 100644
--- a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
@@ -82,7 +82,7 @@ properties:
description:
Current at which the headset micbias sense clamp will engage, 0 to
disable.
- enum: [ 0, 14, 23, 41, 50, 60, 68, 86, 95 ]
+ enum: [ 0, 14, 24, 43, 52, 61, 71, 90, 99 ]
default: 0
cirrus,bias-ramp-ms:
diff --git a/Documentation/filesystems/files.rst b/Documentation/filesystems/files.rst
index bcf84459917f..9e38e4c221ca 100644
--- a/Documentation/filesystems/files.rst
+++ b/Documentation/filesystems/files.rst
@@ -62,7 +62,7 @@ the fdtable structure -
be held.
4. To look up the file structure given an fd, a reader
- must use either lookup_fd_rcu() or files_lookup_fd_rcu() APIs. These
+ must use either lookup_fdget_rcu() or files_lookup_fdget_rcu() APIs. These
take care of barrier requirements due to lock-free lookup.
An example::
@@ -70,43 +70,22 @@ the fdtable structure -
struct file *file;
rcu_read_lock();
- file = lookup_fd_rcu(fd);
- if (file) {
- ...
- }
- ....
+ file = lookup_fdget_rcu(fd);
rcu_read_unlock();
-
-5. Handling of the file structures is special. Since the look-up
- of the fd (fget()/fget_light()) are lock-free, it is possible
- that look-up may race with the last put() operation on the
- file structure. This is avoided using atomic_long_inc_not_zero()
- on ->f_count::
-
- rcu_read_lock();
- file = files_lookup_fd_rcu(files, fd);
if (file) {
- if (atomic_long_inc_not_zero(&file->f_count))
- *fput_needed = 1;
- else
- /* Didn't get the reference, someone's freed */
- file = NULL;
+ ...
+ fput(file);
}
- rcu_read_unlock();
....
- return file;
-
- atomic_long_inc_not_zero() detects if refcounts is already zero or
- goes to zero during increment. If it does, we fail
- fget()/fget_light().
-6. Since both fdtable and file structures can be looked up
+5. Since both fdtable and file structures can be looked up
lock-free, they must be installed using rcu_assign_pointer()
API. If they are looked up lock-free, rcu_dereference()
must be used. However it is advisable to use files_fdtable()
- and lookup_fd_rcu()/files_lookup_fd_rcu() which take care of these issues.
+ and lookup_fdget_rcu()/files_lookup_fdget_rcu() which take care of these
+ issues.
-7. While updating, the fdtable pointer must be looked up while
+6. While updating, the fdtable pointer must be looked up while
holding files->file_lock. If ->file_lock is dropped, then
another thread expand the files thereby creating a new
fdtable and making the earlier fdtable pointer stale.
@@ -126,3 +105,19 @@ the fdtable structure -
Since locate_fd() can drop ->file_lock (and reacquire ->file_lock),
the fdtable pointer (fdt) must be loaded after locate_fd().
+On newer kernels rcu based file lookup has been switched to rely on
+SLAB_TYPESAFE_BY_RCU instead of call_rcu(). It isn't sufficient anymore
+to just acquire a reference to the file in question under rcu using
+atomic_long_inc_not_zero() since the file might have already been
+recycled and someone else might have bumped the reference. In other
+words, callers might see reference count bumps from newer users. For
+this is reason it is necessary to verify that the pointer is the same
+before and after the reference count increment. This pattern can be seen
+in get_file_rcu() and __files_get_rcu().
+
+In addition, it isn't possible to access or check fields in struct file
+without first aqcuiring a reference on it under rcu lookup. Not doing
+that was always very dodgy and it was only usable for non-pointer data
+in struct file. With SLAB_TYPESAFE_BY_RCU it is necessary that callers
+either first acquire a reference or they must hold the files_lock of the
+fdtable.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index a624e92f2687..1b84f818e574 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -261,9 +261,9 @@ DIRECT_KEY policies
The Adiantum encryption mode (see `Encryption modes and usage`_) is
suitable for both contents and filenames encryption, and it accepts
-long IVs --- long enough to hold both an 8-byte logical block number
-and a 16-byte per-file nonce. Also, the overhead of each Adiantum key
-is greater than that of an AES-256-XTS key.
+long IVs --- long enough to hold both an 8-byte data unit index and a
+16-byte per-file nonce. Also, the overhead of each Adiantum key is
+greater than that of an AES-256-XTS key.
Therefore, to improve performance and save memory, for Adiantum a
"direct key" configuration is supported. When the user has enabled
@@ -300,8 +300,8 @@ IV_INO_LBLK_32 policies
IV_INO_LBLK_32 policies work like IV_INO_LBLK_64, except that for
IV_INO_LBLK_32, the inode number is hashed with SipHash-2-4 (where the
-SipHash key is derived from the master key) and added to the file
-logical block number mod 2^32 to produce a 32-bit IV.
+SipHash key is derived from the master key) and added to the file data
+unit index mod 2^32 to produce a 32-bit IV.
This format is optimized for use with inline encryption hardware
compliant with the eMMC v5.2 standard, which supports only 32 IV bits
@@ -451,31 +451,62 @@ acceleration is recommended:
Contents encryption
-------------------
-For file contents, each filesystem block is encrypted independently.
-Starting from Linux kernel 5.5, encryption of filesystems with block
-size less than system's page size is supported.
-
-Each block's IV is set to the logical block number within the file as
-a little endian number, except that:
-
-- With CBC mode encryption, ESSIV is also used. Specifically, each IV
- is encrypted with AES-256 where the AES-256 key is the SHA-256 hash
- of the file's data encryption key.
-
-- With `DIRECT_KEY policies`_, the file's nonce is appended to the IV.
- Currently this is only allowed with the Adiantum encryption mode.
-
-- With `IV_INO_LBLK_64 policies`_, the logical block number is limited
- to 32 bits and is placed in bits 0-31 of the IV. The inode number
- (which is also limited to 32 bits) is placed in bits 32-63.
-
-- With `IV_INO_LBLK_32 policies`_, the logical block number is limited
- to 32 bits and is placed in bits 0-31 of the IV. The inode number
- is then hashed and added mod 2^32.
-
-Note that because file logical block numbers are included in the IVs,
-filesystems must enforce that blocks are never shifted around within
-encrypted files, e.g. via "collapse range" or "insert range".
+For contents encryption, each file's contents is divided into "data
+units". Each data unit is encrypted independently. The IV for each
+data unit incorporates the zero-based index of the data unit within
+the file. This ensures that each data unit within a file is encrypted
+differently, which is essential to prevent leaking information.
+
+Note: the encryption depending on the offset into the file means that
+operations like "collapse range" and "insert range" that rearrange the
+extent mapping of files are not supported on encrypted files.
+
+There are two cases for the sizes of the data units:
+
+* Fixed-size data units. This is how all filesystems other than UBIFS
+ work. A file's data units are all the same size; the last data unit
+ is zero-padded if needed. By default, the data unit size is equal
+ to the filesystem block size. On some filesystems, users can select
+ a sub-block data unit size via the ``log2_data_unit_size`` field of
+ the encryption policy; see `FS_IOC_SET_ENCRYPTION_POLICY`_.
+
+* Variable-size data units. This is what UBIFS does. Each "UBIFS
+ data node" is treated as a crypto data unit. Each contains variable
+ length, possibly compressed data, zero-padded to the next 16-byte
+ boundary. Users cannot select a sub-block data unit size on UBIFS.
+
+In the case of compression + encryption, the compressed data is
+encrypted. UBIFS compression works as described above. f2fs
+compression works a bit differently; it compresses a number of
+filesystem blocks into a smaller number of filesystem blocks.
+Therefore a f2fs-compressed file still uses fixed-size data units, and
+it is encrypted in a similar way to a file containing holes.
+
+As mentioned in `Key hierarchy`_, the default encryption setting uses
+per-file keys. In this case, the IV for each data unit is simply the
+index of the data unit in the file. However, users can select an
+encryption setting that does not use per-file keys. For these, some
+kind of file identifier is incorporated into the IVs as follows:
+
+- With `DIRECT_KEY policies`_, the data unit index is placed in bits
+ 0-63 of the IV, and the file's nonce is placed in bits 64-191.
+
+- With `IV_INO_LBLK_64 policies`_, the data unit index is placed in
+ bits 0-31 of the IV, and the file's inode number is placed in bits
+ 32-63. This setting is only allowed when data unit indices and
+ inode numbers fit in 32 bits.
+
+- With `IV_INO_LBLK_32 policies`_, the file's inode number is hashed
+ and added to the data unit index. The resulting value is truncated
+ to 32 bits and placed in bits 0-31 of the IV. This setting is only
+ allowed when data unit indices and inode numbers fit in 32 bits.
+
+The byte order of the IV is always little endian.
+
+If the user selects FSCRYPT_MODE_AES_128_CBC for the contents mode, an
+ESSIV layer is automatically included. In this case, before the IV is
+passed to AES-128-CBC, it is encrypted with AES-256 where the AES-256
+key is the SHA-256 hash of the file's contents encryption key.
Filenames encryption
--------------------
@@ -544,7 +575,8 @@ follows::
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
- __u8 __reserved[4];
+ __u8 log2_data_unit_size;
+ __u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};
@@ -586,6 +618,29 @@ This structure must be initialized as follows:
The DIRECT_KEY, IV_INO_LBLK_64, and IV_INO_LBLK_32 flags are
mutually exclusive.
+- ``log2_data_unit_size`` is the log2 of the data unit size in bytes,
+ or 0 to select the default data unit size. The data unit size is
+ the granularity of file contents encryption. For example, setting
+ ``log2_data_unit_size`` to 12 causes file contents be passed to the
+ underlying encryption algorithm (such as AES-256-XTS) in 4096-byte
+ data units, each with its own IV.
+
+ Not all filesystems support setting ``log2_data_unit_size``. ext4
+ and f2fs support it since Linux v6.7. On filesystems that support
+ it, the supported nonzero values are 9 through the log2 of the
+ filesystem block size, inclusively. The default value of 0 selects
+ the filesystem block size.
+
+ The main use case for ``log2_data_unit_size`` is for selecting a
+ data unit size smaller than the filesystem block size for
+ compatibility with inline encryption hardware that only supports
+ smaller data unit sizes. ``/sys/block/$disk/queue/crypto/`` may be
+ useful for checking which data unit sizes are supported by a
+ particular system's inline encryption hardware.
+
+ Leave this field zeroed unless you are certain you need it. Using
+ an unnecessarily small data unit size reduces performance.
+
- For v2 encryption policies, ``__reserved`` must be zeroed.
- For v1 encryption policies, ``master_key_descriptor`` specifies how
@@ -1079,8 +1134,8 @@ The caller must zero all input fields, then fill in ``key_spec``:
On success, 0 is returned and the kernel fills in the output fields:
- ``status`` indicates whether the key is absent, present, or
- incompletely removed. Incompletely removed means that the master
- secret has been removed, but some files are still in use; i.e.,
+ incompletely removed. Incompletely removed means that removal has
+ been initiated, but some files are still in use; i.e.,
`FS_IOC_REMOVE_ENCRYPTION_KEY`_ returned 0 but set the informational
status flag FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY.
diff --git a/Documentation/filesystems/nfs/exporting.rst b/Documentation/filesystems/nfs/exporting.rst
index 4b30daee399a..198d805d611c 100644
--- a/Documentation/filesystems/nfs/exporting.rst
+++ b/Documentation/filesystems/nfs/exporting.rst
@@ -241,3 +241,10 @@ following flags are defined:
all of an inode's dirty data on last close. Exports that behave this
way should set EXPORT_OP_FLUSH_ON_CLOSE so that NFSD knows to skip
waiting for writeback when closing such files.
+
+ EXPORT_OP_ASYNC_LOCK - Indicates a capable filesystem to do async lock
+ requests from lockd. Only set EXPORT_OP_ASYNC_LOCK if the filesystem has
+ it's own ->lock() functionality as core posix_lock_file() implementation
+ has no async lock request handling yet. For more information about how to
+ indicate an async lock request from a ->lock() file_operations struct, see
+ fs/locks.c and comment for the function vfs_lock_file().
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index 4d05b9862451..d69f59700a23 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -1045,3 +1045,10 @@ filesystem type is now moved to a later point when the devices are closed:
As this is a VFS level change it has no practical consequences for filesystems
other than that all of them must use one of the provided kill_litter_super(),
kill_anon_super(), or kill_block_super() helpers.
+
+---
+
+**mandatory**
+
+Lock ordering has been changed so that s_umount ranks above open_mutex again.
+All places where s_umount was taken under open_mutex have been fixed up.
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index d1ebcd927149..065661acb878 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -323,7 +323,7 @@ operations:
- dev-name
- sb-index
reply: &sb-get-reply
- value: 11
+ value: 13
attributes: *sb-id-attrs
dump:
request:
@@ -350,7 +350,7 @@ operations:
- sb-index
- sb-pool-index
reply: &sb-pool-get-reply
- value: 15
+ value: 17
attributes: *sb-pool-id-attrs
dump:
request:
@@ -378,7 +378,7 @@ operations:
- sb-index
- sb-pool-index
reply: &sb-port-pool-get-reply
- value: 19
+ value: 21
attributes: *sb-port-pool-id-attrs
dump:
request:
@@ -407,7 +407,7 @@ operations:
- sb-pool-type
- sb-tc-index
reply: &sb-tc-pool-bind-get-reply
- value: 23
+ value: 25
attributes: *sb-tc-pool-bind-id-attrs
dump:
request:
@@ -538,7 +538,7 @@ operations:
- dev-name
- trap-name
reply: &trap-get-reply
- value: 61
+ value: 63
attributes: *trap-id-attrs
dump:
request:
@@ -564,7 +564,7 @@ operations:
- dev-name
- trap-group-name
reply: &trap-group-get-reply
- value: 65
+ value: 67
attributes: *trap-group-id-attrs
dump:
request:
@@ -590,7 +590,7 @@ operations:
- dev-name
- trap-policer-id
reply: &trap-policer-get-reply
- value: 69
+ value: 71
attributes: *trap-policer-id-attrs
dump:
request:
@@ -617,7 +617,7 @@ operations:
- port-index
- rate-node-name
reply: &rate-get-reply
- value: 74
+ value: 76
attributes: *rate-id-attrs
dump:
request:
@@ -643,7 +643,7 @@ operations:
- dev-name
- linecard-index
reply: &linecard-get-reply
- value: 78
+ value: 80
attributes: *linecard-id-attrs
dump:
request:
diff --git a/Documentation/netlink/specs/nfsd.yaml b/Documentation/netlink/specs/nfsd.yaml
new file mode 100644
index 000000000000..05acc73e2e33
--- /dev/null
+++ b/Documentation/netlink/specs/nfsd.yaml
@@ -0,0 +1,89 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+name: nfsd
+protocol: genetlink
+uapi-header: linux/nfsd_netlink.h
+
+doc: NFSD configuration over generic netlink.
+
+attribute-sets:
+ -
+ name: rpc-status
+ attributes:
+ -
+ name: xid
+ type: u32
+ byte-order: big-endian
+ -
+ name: flags
+ type: u32
+ -
+ name: prog
+ type: u32
+ -
+ name: version
+ type: u8
+ -
+ name: proc
+ type: u32
+ -
+ name: service_time
+ type: s64
+ -
+ name: pad
+ type: pad
+ -
+ name: saddr4
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: daddr4
+ type: u32
+ byte-order: big-endian
+ display-hint: ipv4
+ -
+ name: saddr6
+ type: binary
+ display-hint: ipv6
+ -
+ name: daddr6
+ type: binary
+ display-hint: ipv6
+ -
+ name: sport
+ type: u16
+ byte-order: big-endian
+ -
+ name: dport
+ type: u16
+ byte-order: big-endian
+ -
+ name: compound-ops
+ type: u32
+ multi-attr: true
+
+operations:
+ list:
+ -
+ name: rpc-status-get
+ doc: dump pending nfsd rpc
+ attribute-set: rpc-status
+ dump:
+ pre: nfsd-nl-rpc-status-get-start
+ post: nfsd-nl-rpc-status-get-done
+ reply:
+ attributes:
+ - xid
+ - flags
+ - prog
+ - version
+ - proc
+ - service_time
+ - saddr4
+ - daddr4
+ - saddr6
+ - daddr6
+ - sport
+ - dport
+ - compound-ops
diff --git a/Documentation/networking/representors.rst b/Documentation/networking/representors.rst
index ee1f5cd54496..decb39c19b9e 100644
--- a/Documentation/networking/representors.rst
+++ b/Documentation/networking/representors.rst
@@ -162,9 +162,11 @@ How are representors identified?
The representor netdevice should *not* directly refer to a PCIe device (e.g.
through ``net_dev->dev.parent`` / ``SET_NETDEV_DEV()``), either of the
representee or of the switchdev function.
-Instead, it should implement the ``ndo_get_devlink_port()`` netdevice op, which
-the kernel uses to provide the ``phys_switch_id`` and ``phys_port_name`` sysfs
-nodes. (Some legacy drivers implement ``ndo_get_port_parent_id()`` and
+Instead, the driver should use the ``SET_NETDEV_DEVLINK_PORT`` macro to
+assign a devlink port instance to the netdevice before registering the
+netdevice; the kernel uses the devlink port to provide the ``phys_switch_id``
+and ``phys_port_name`` sysfs nodes.
+(Some legacy drivers implement ``ndo_get_port_parent_id()`` and
``ndo_get_phys_port_name()`` directly, but this is deprecated.) See
:ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>` for the
details of this API.
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index 49029ee82e55..081397827a7e 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -29,7 +29,7 @@ target with the same invocation used for compilation, e.g.::
To read the docs locally in your web browser, run e.g.::
- xdg-open rust/doc/kernel/index.html
+ xdg-open Documentation/output/rust/rustdoc/kernel/index.html
To learn about how to write the documentation, please see coding-guidelines.rst.
diff --git a/Documentation/trace/fprobe.rst b/Documentation/trace/fprobe.rst
index 7a895514b537..196f52386aaa 100644
--- a/Documentation/trace/fprobe.rst
+++ b/Documentation/trace/fprobe.rst
@@ -91,9 +91,9 @@ The prototype of the entry/exit callback function are as follows:
.. code-block:: c
- int entry_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
+ int entry_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
- void exit_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
+ void exit_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
Note that the @entry_ip is saved at function entry and passed to exit handler.
If the entry callback function returns !0, the corresponding exit callback will be cancelled.
@@ -108,6 +108,10 @@ If the entry callback function returns !0, the corresponding exit callback will
Note that this may not be the actual entry address of the function but
the address where the ftrace is instrumented.
+@ret_ip
+ This is the return address that the traced function will return to,
+ somewhere in the caller. This can be used at both entry and exit.
+
@regs
This is the `pt_regs` data structure at the entry and exit. Note that
the instruction pointer of @regs may be different from the @entry_ip
diff --git a/MAINTAINERS b/MAINTAINERS
index 7a7bd8bd80e9..55d9d860f1a7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -378,8 +378,9 @@ F: drivers/acpi/viot.c
F: include/linux/acpi_viot.h
ACPI WMI DRIVER
+M: Armin Wolf <W_Armin@gmx.de>
L: platform-driver-x86@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/driver-api/wmi.rst
F: Documentation/wmi/
F: drivers/platform/x86/wmi.c
@@ -3481,6 +3482,14 @@ W: http://bcache.evilpiepirate.org
C: irc://irc.oftc.net/bcache
F: drivers/md/bcache/
+BCACHEFS
+M: Kent Overstreet <kent.overstreet@linux.dev>
+R: Brian Foster <bfoster@redhat.com>
+L: linux-bcachefs@vger.kernel.org
+S: Supported
+C: irc://irc.oftc.net/bcache
+F: fs/bcachefs/
+
BDISP ST MEDIA DRIVER
M: Fabien Dessenne <fabien.dessenne@foss.st.com>
L: linux-media@vger.kernel.org
@@ -5067,6 +5076,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
F: Documentation/devicetree/bindings/timer/
F: drivers/clocksource/
+CLOSURES
+M: Kent Overstreet <kent.overstreet@linux.dev>
+L: linux-bcachefs@vger.kernel.org
+S: Supported
+C: irc://irc.oftc.net/bcache
+F: include/linux/closure.h
+F: lib/closure.c
+
CMPC ACPI DRIVER
M: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
M: Daniel Oliveira Nascimento <don@syst.com.br>
@@ -6765,7 +6782,7 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7701.c
DRM DRIVER FOR SITRONIX ST7703 PANELS
M: Guido Günther <agx@sigxcpu.org>
R: Purism Kernel Team <kernel@puri.sm>
-R: Ondrej Jirman <megous@megous.com>
+R: Ondrej Jirman <megi@xff.cz>
S: Maintained
F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
F: drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -8747,6 +8764,13 @@ S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
F: drivers/pmdomain/
+GENERIC RADIX TREE
+M: Kent Overstreet <kent.overstreet@linux.dev>
+S: Supported
+C: irc://irc.oftc.net/bcache
+F: include/linux/generic-radix-tree.h
+F: lib/generic-radix-tree.c
+
GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
L: linux-input@vger.kernel.org
@@ -10700,7 +10724,7 @@ F: drivers/mfd/intel-m10-bmc*
F: include/linux/mfd/intel-m10-bmc.h
INTEL MAX10 BMC SECURE UPDATES
-M: Russ Weight <russell.h.weight@intel.com>
+M: Peter Colberg <peter.colberg@intel.com>
L: linux-fpga@vger.kernel.org
S: Maintained
F: Documentation/ABI/testing/sysfs-driver-intel-m10-bmc-sec-update
@@ -13845,9 +13869,10 @@ F: Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
F: drivers/staging/media/meson/vdec/
METHODE UDPU SUPPORT
-M: Vladimir Vid <vladimir.vid@sartura.hr>
+M: Robert Marko <robert.marko@sartura.hr>
S: Maintained
-F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts
+F: arch/arm64/boot/dts/marvell/armada-3720-eDPU.dts
+F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.*
MHI BUS
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
@@ -15130,7 +15155,7 @@ NOLIBC HEADER FILE
M: Willy Tarreau <w@1wt.eu>
M: Thomas Weißschuh <linux@weissschuh.net>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nolibc/linux-nolibc.git
F: tools/include/nolibc/
F: tools/testing/selftests/nolibc/
@@ -23713,6 +23738,13 @@ F: Documentation/devicetree/bindings/media/xilinx/
F: drivers/media/platform/xilinx/
F: include/uapi/linux/xilinx-v4l2-controls.h
+XILINX VERSAL EDAC DRIVER
+M: Shubhrajyoti Datta <shubhrajyoti.datta@amd.com>
+M: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
+S: Maintained
+F: Documentation/devicetree/bindings/memory-controllers/xlnx,versal-ddrmc-edac.yaml
+F: drivers/edac/versal_edac.c
+
XILINX WATCHDOG DRIVER
M: Srinivas Neeli <srinivas.neeli@amd.com>
R: Shubhrajyoti Datta <shubhrajyoti.datta@amd.com>
diff --git a/Makefile b/Makefile
index a3e52e10840e..5c418efbe89b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -1474,7 +1474,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json .thinlto-cache rust/test rust/doc \
+ compile_commands.json .thinlto-cache rust/test \
rust-project.json .vmlinux.objs .vmlinux.export.c
# Directories & files removed with 'make mrproper'
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index d5b3ed2c58f5..c380d8c30704 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -90,10 +90,12 @@ static void show_faulting_vma(unsigned long address)
*/
if (vma) {
char buf[ARC_PATH_MAX];
- char *nm = "?";
+ char *nm = "anon";
if (vma->vm_file) {
- nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
+ /* XXX: can we use %pD below and get rid of buf? */
+ nm = d_path(file_user_path(vma->vm_file), buf,
+ ARC_PATH_MAX-1);
if (IS_ERR(nm))
nm = "?";
}
diff --git a/arch/arm/boot/dts/rockchip/rk3128.dtsi b/arch/arm/boot/dts/rockchip/rk3128.dtsi
index b63bd4ad3143..88a4b0d6d928 100644
--- a/arch/arm/boot/dts/rockchip/rk3128.dtsi
+++ b/arch/arm/boot/dts/rockchip/rk3128.dtsi
@@ -64,7 +64,8 @@
compatible = "arm,armv7-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
arm,cpu-registers-not-fw-configured;
clock-frequency = <24000000>;
};
@@ -233,7 +234,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044000 0x20>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER0>;
clock-names = "pclk", "timer";
};
@@ -241,7 +242,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044020 0x20>;
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER1>;
clock-names = "pclk", "timer";
};
@@ -249,7 +250,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044040 0x20>;
interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER2>;
clock-names = "pclk", "timer";
};
@@ -257,7 +258,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044060 0x20>;
interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER3>;
clock-names = "pclk", "timer";
};
@@ -265,7 +266,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x20044080 0x20>;
interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER4>;
clock-names = "pclk", "timer";
};
@@ -273,7 +274,7 @@
compatible = "rockchip,rk3128-timer", "rockchip,rk3288-timer";
reg = <0x200440a0 0x20>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru PCLK_TIMER>, <&xin24m>;
+ clocks = <&cru PCLK_TIMER>, <&cru SCLK_TIMER5>;
clock-names = "pclk", "timer";
};
@@ -426,7 +427,7 @@
i2c0: i2c@20072000 {
compatible = "rockchip,rk3128-i2c", "rockchip,rk3288-i2c";
- reg = <20072000 0x1000>;
+ reg = <0x20072000 0x1000>;
interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "i2c";
clocks = <&cru PCLK_I2C0>;
@@ -458,6 +459,7 @@
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
arm,pl330-broken-no-flushp;
+ arm,pl330-periph-burst;
clocks = <&cru ACLK_DMAC>;
clock-names = "apb_pclk";
#dma-cells = <1>;
diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
index 7ae8b620515c..59f546a278f8 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-l4-abe.dtsi
@@ -109,6 +109,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49022000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP4_MCBSP1_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -142,6 +144,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49024000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP4_MCBSP2_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -175,6 +179,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49026000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP4_MCBSP3_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
diff --git a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
index 46b8f9efd413..3fcef3080eae 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-l4.dtsi
@@ -2043,6 +2043,8 @@
compatible = "ti,omap4-mcbsp";
reg = <0x0 0xff>; /* L4 Interconnect */
reg-names = "mpu";
+ clocks = <&l4_per_clkctrl OMAP4_MCBSP4_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
diff --git a/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi b/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
index a03bca5a3584..97b0c3b5f573 100644
--- a/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap5-l4-abe.dtsi
@@ -109,6 +109,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49022000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP5_MCBSP1_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -142,6 +144,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49024000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP5_MCBSP2_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
@@ -175,6 +179,8 @@
reg = <0x0 0xff>, /* MPU private access */
<0x49026000 0xff>; /* L3 Interconnect */
reg-names = "mpu", "dma";
+ clocks = <&abe_clkctrl OMAP5_MCBSP3_CLKCTRL 24>;
+ clock-names = "fck";
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "common";
ti,buffer-size = <128>;
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 9808cd27e2cf..67de96c7717d 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -550,6 +550,7 @@ static struct platform_device *ams_delta_devices[] __initdata = {
&ams_delta_nand_device,
&ams_delta_lcd_device,
&cx20442_codec_device,
+ &modem_nreset_device,
};
static struct gpiod_lookup_table *ams_delta_gpio_tables[] __initdata = {
@@ -782,26 +783,28 @@ static struct plat_serial8250_port ams_delta_modem_ports[] = {
{ },
};
+static int ams_delta_modem_pm_activate(struct device *dev)
+{
+ modem_priv.regulator = regulator_get(dev, "RESET#");
+ if (IS_ERR(modem_priv.regulator))
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
+static struct dev_pm_domain ams_delta_modem_pm_domain = {
+ .activate = ams_delta_modem_pm_activate,
+};
+
static struct platform_device ams_delta_modem_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM1,
.dev = {
.platform_data = ams_delta_modem_ports,
+ .pm_domain = &ams_delta_modem_pm_domain,
},
};
-static int __init modem_nreset_init(void)
-{
- int err;
-
- err = platform_device_register(&modem_nreset_device);
- if (err)
- pr_err("Couldn't register the modem regulator device\n");
-
- return err;
-}
-
-
/*
* This function expects MODEM IRQ number already assigned to the port.
* The MODEM device requires its RESET# pin kept high during probe.
@@ -833,37 +836,6 @@ static int __init ams_delta_modem_init(void)
}
arch_initcall_sync(ams_delta_modem_init);
-static int __init late_init(void)
-{
- int err;
-
- err = modem_nreset_init();
- if (err)
- return err;
-
- /*
- * Once the modem device is registered, the modem_nreset
- * regulator can be requested on behalf of that device.
- */
- modem_priv.regulator = regulator_get(&ams_delta_modem_device.dev,
- "RESET#");
- if (IS_ERR(modem_priv.regulator)) {
- err = PTR_ERR(modem_priv.regulator);
- goto unregister;
- }
- return 0;
-
-unregister:
- platform_device_unregister(&ams_delta_modem_device);
- return err;
-}
-
-static void __init ams_delta_init_late(void)
-{
- omap1_init_late();
- late_init();
-}
-
static void __init ams_delta_map_io(void)
{
omap1_map_io();
@@ -877,7 +849,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
.init_early = omap1_init_early,
.init_irq = omap1_init_irq,
.init_machine = ams_delta_init,
- .init_late = ams_delta_init_late,
+ .init_late = omap1_init_late,
.init_time = omap1_timer_init,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 410d17d1d443..f618a6df2938 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -176,17 +176,18 @@ static u64 notrace omap_32k_read_sched_clock(void)
return sync32k_cnt_reg ? readl_relaxed(sync32k_cnt_reg) : 0;
}
+static struct timespec64 persistent_ts;
+static cycles_t cycles;
+static unsigned int persistent_mult, persistent_shift;
+
/**
* omap_read_persistent_clock64 - Return time from a persistent clock.
+ * @ts: &struct timespec64 for the returned time
*
* Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into
* nsecs and adds to a monotonically increasing timespec64.
*/
-static struct timespec64 persistent_ts;
-static cycles_t cycles;
-static unsigned int persistent_mult, persistent_shift;
-
static void omap_read_persistent_clock64(struct timespec64 *ts)
{
unsigned long long nsecs;
@@ -206,10 +207,9 @@ static void omap_read_persistent_clock64(struct timespec64 *ts)
/**
* omap_init_clocksource_32k - setup and register counter 32k as a
* kernel clocksource
- * @pbase: base addr of counter_32k module
- * @size: size of counter_32k to map
+ * @vbase: base addr of counter_32k module
*
- * Returns 0 upon success or negative error code upon failure.
+ * Returns: %0 upon success or negative error code upon failure.
*
*/
static int __init omap_init_clocksource_32k(void __iomem *vbase)
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 1e17b5f77588..ba71928c0fcb 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2209,7 +2209,7 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
return err;
pr_debug("omap_hwmod: %s %pOFn at %pR\n",
- oh->name, np, &res);
+ oh->name, np, res);
if (oh && oh->mpu_rt_idx) {
omap_hwmod_fix_mpu_rt_idx(oh, np, res);
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
index 385b178314db..3067a4091a7a 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dts
@@ -62,25 +62,23 @@
stdout-path = "serial0:115200n8";
};
- clocks {
- divclk4: divclk4 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "divclk4";
+ div1_mclk: divclk1 {
+ compatible = "gpio-gate-clock";
+ pinctrl-0 = <&audio_mclk>;
+ pinctrl-names = "default";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+ #clock-cells = <0>;
+ enable-gpios = <&pm8994_gpios 15 0>;
+ };
- pinctrl-names = "default";
- pinctrl-0 = <&divclk4_pin_a>;
- };
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
- div1_mclk: divclk1 {
- compatible = "gpio-gate-clock";
- pinctrl-0 = <&audio_mclk>;
- pinctrl-names = "default";
- clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
- #clock-cells = <0>;
- enable-gpios = <&pm8994_gpios 15 0>;
- };
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
};
gpio-keys {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
index bcd2397eb373..06f8ff624181 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-common.dtsi
@@ -11,26 +11,24 @@
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
- clocks {
- divclk1_cdc: divclk1 {
- compatible = "gpio-gate-clock";
- clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
- #clock-cells = <0>;
- enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
+ divclk1_cdc: divclk1 {
+ compatible = "gpio-gate-clock";
+ clocks = <&rpmcc RPM_SMD_DIV_CLK1>;
+ #clock-cells = <0>;
+ enable-gpios = <&pm8994_gpios 15 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&divclk1_default>;
- };
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk1_default>;
+ };
- divclk4: divclk4 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "divclk4";
+ divclk4: divclk4 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk4";
- pinctrl-names = "default";
- pinctrl-0 = <&divclk4_pin_a>;
- };
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk4_pin_a>;
};
gpio-keys {
diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
index d1066edaea47..f8e9d90afab0 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
+++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts
@@ -20,16 +20,14 @@
qcom,pmic-id = <0x20009 0x2000a 0x00 0x00>;
qcom,board-id = <31 0>;
- clocks {
- divclk2_haptics: divclk2 {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <32768>;
- clock-output-names = "divclk2";
-
- pinctrl-names = "default";
- pinctrl-0 = <&divclk2_pin_a>;
- };
+ divclk2_haptics: divclk2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "divclk2";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&divclk2_pin_a>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
index 3c3b6287cd27..eaa43f022a65 100644
--- a/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8775p-pmics.dtsi
@@ -173,7 +173,7 @@
compatible = "qcom,pmm8654au-gpio", "qcom,spmi-gpio";
reg = <0x8800>;
gpio-controller;
- gpio-ranges = <&pmm8654au_2_gpios 0 0 12>;
+ gpio-ranges = <&pmm8654au_1_gpios 0 0 12>;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
index 08a3ad3e7ae9..de0a1f2af983 100644
--- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts
@@ -68,15 +68,17 @@
simple-audio-card,format = "i2s";
simple-audio-card,name = "Haikou,I2S-codec";
simple-audio-card,mclk-fs = <512>;
+ simple-audio-card,frame-master = <&sgtl5000_codec>;
+ simple-audio-card,bitclock-master = <&sgtl5000_codec>;
- simple-audio-card,codec {
- clocks = <&sgtl5000_clk>;
+ sgtl5000_codec: simple-audio-card,codec {
sound-dai = <&sgtl5000>;
+ // Prevent the dai subsystem from overwriting the clock
+ // frequency. We are using a fixed-frequency oscillator.
+ system-clock-fixed;
};
simple-audio-card,cpu {
- bitclock-master;
- frame-master;
sound-dai = <&i2s0_8ch>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 7dccbe8a9393..f2279aa6ca9e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -492,6 +492,7 @@
&i2s0 {
pinctrl-0 = <&i2s0_2ch_bus>;
+ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
rockchip,capture-channels = <2>;
rockchip,playback-channels = <2>;
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 9da0b6d77c8d..5bc2d4faeea6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -2457,6 +2457,16 @@
<4 RK_PA0 1 &pcfg_pull_none>;
};
+ i2s0_2ch_bus_bclk_off: i2s0-2ch-bus-bclk-off {
+ rockchip,pins =
+ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
+ <3 RK_PD1 1 &pcfg_pull_none>,
+ <3 RK_PD2 1 &pcfg_pull_none>,
+ <3 RK_PD3 1 &pcfg_pull_none>,
+ <3 RK_PD7 1 &pcfg_pull_none>,
+ <4 RK_PA0 1 &pcfg_pull_none>;
+ };
+
i2s0_8ch_bus: i2s0-8ch-bus {
rockchip,pins =
<3 RK_PD0 1 &pcfg_pull_none>,
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 5882b2415596..1095c6647e96 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -344,14 +344,14 @@
*/
#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51))
#define __HFGRTR_EL2_MASK GENMASK(49, 0)
-#define __HFGRTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+#define __HFGRTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \
BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
GENMASK(26, 25) | BIT(21) | BIT(18) | \
GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
#define __HFGWTR_EL2_MASK GENMASK(49, 0)
-#define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+#define __HFGWTR_EL2_nMASK (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
#define __HFGITR_EL2_RES0 GENMASK(63, 57)
#define __HFGITR_EL2_MASK GENMASK(54, 0)
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 6dcdae4d38cb..a1e24228aaaa 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -55,11 +55,6 @@ static struct irq_ops arch_timer_irq_ops = {
.get_input_level = kvm_arch_timer_get_input_level,
};
-static bool has_cntpoff(void)
-{
- return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
-}
-
static int nr_timers(struct kvm_vcpu *vcpu)
{
if (!vcpu_has_nv(vcpu))
@@ -180,7 +175,7 @@ u64 kvm_phys_timer_read(void)
return timecounter->cc->read(timecounter->cc);
}
-static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
{
if (vcpu_has_nv(vcpu)) {
if (is_hyp_ctxt(vcpu)) {
@@ -548,8 +543,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
cval = read_sysreg_el0(SYS_CNTP_CVAL);
- if (!has_cntpoff())
- cval -= timer_get_offset(ctx);
+ cval -= timer_get_offset(ctx);
timer_set_cval(ctx, cval);
@@ -636,8 +630,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
cval = timer_get_cval(ctx);
offset = timer_get_offset(ctx);
set_cntpoff(offset);
- if (!has_cntpoff())
- cval += offset;
+ cval += offset;
write_sysreg_el0(cval, SYS_CNTP_CVAL);
isb();
write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index 9ced1bf0c2b7..ee902ff2a50f 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -977,6 +977,8 @@ enum fg_filter_id {
static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
/* HFGRTR_EL2, HFGWTR_EL2 */
+ SR_FGT(SYS_PIR_EL1, HFGxTR, nPIR_EL1, 0),
+ SR_FGT(SYS_PIRE0_EL1, HFGxTR, nPIRE0_EL1, 0),
SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0),
SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0),
SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0),
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 6537f58b1a8c..448b17080d36 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -39,6 +39,26 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
___activate_traps(vcpu);
+ if (has_cntpoff()) {
+ struct timer_map map;
+
+ get_timer_map(vcpu, &map);
+
+ /*
+ * We're entrering the guest. Reload the correct
+ * values from memory now that TGE is clear.
+ */
+ if (map.direct_ptimer == vcpu_ptimer(vcpu))
+ val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+ if (map.direct_ptimer == vcpu_hptimer(vcpu))
+ val = __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
+
+ if (map.direct_ptimer) {
+ write_sysreg_el0(val, SYS_CNTP_CVAL);
+ isb();
+ }
+ }
+
val = read_sysreg(cpacr_el1);
val |= CPACR_ELx_TTA;
val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
@@ -77,6 +97,30 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ if (has_cntpoff()) {
+ struct timer_map map;
+ u64 val, offset;
+
+ get_timer_map(vcpu, &map);
+
+ /*
+ * We're exiting the guest. Save the latest CVAL value
+ * to memory and apply the offset now that TGE is set.
+ */
+ val = read_sysreg_el0(SYS_CNTP_CVAL);
+ if (map.direct_ptimer == vcpu_ptimer(vcpu))
+ __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
+ if (map.direct_ptimer == vcpu_hptimer(vcpu))
+ __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
+
+ offset = read_sysreg_s(SYS_CNTPOFF_EL2);
+
+ if (map.direct_ptimer && offset) {
+ write_sysreg_el0(val + offset, SYS_CNTP_CVAL);
+ isb();
+ }
+ }
+
/*
* ARM errata 1165522 and 1530923 require the actual execution of the
* above before we can switch to the EL2/EL0 translation regime used by
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 0eea225fd09a..a243934c5568 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -39,7 +39,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
+ if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
@@ -55,7 +55,7 @@ void kvm_clr_pmu_events(u32 clr)
{
struct kvm_pmu_events *pmu = kvm_get_pmu_events();
- if (!kvm_arm_support_pmu_v3() || !pmu)
+ if (!kvm_arm_support_pmu_v3())
return;
pmu->events_host &= ~clr;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index e92ec810d449..0afd6136e275 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2122,8 +2122,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
- { SYS_DESC(SYS_PIRE0_EL1), access_vm_reg, reset_unknown, PIRE0_EL1 },
- { SYS_DESC(SYS_PIR_EL1), access_vm_reg, reset_unknown, PIR_EL1 },
+ { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
+ { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index 0dcb36b32cb2..c486c2341b66 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -52,10 +52,9 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
* @offset: bus address of the memory
* @size: size of the resource to map
*/
-extern pgprot_t pgprot_wc;
-
#define ioremap_wc(offset, size) \
- ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
+ ioremap_prot((offset), (size), \
+ pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
#define ioremap_cache(offset, size) \
ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
diff --git a/arch/loongarch/include/asm/linkage.h b/arch/loongarch/include/asm/linkage.h
index 81b0c4cfbf4f..e2eca1a25b4e 100644
--- a/arch/loongarch/include/asm/linkage.h
+++ b/arch/loongarch/include/asm/linkage.h
@@ -33,4 +33,12 @@
.cfi_endproc; \
SYM_END(name, SYM_T_FUNC)
+#define SYM_CODE_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) \
+ .cfi_startproc;
+
+#define SYM_CODE_END(name) \
+ .cfi_endproc; \
+ SYM_END(name, SYM_T_NONE)
+
#endif
diff --git a/arch/loongarch/include/asm/pgtable-bits.h b/arch/loongarch/include/asm/pgtable-bits.h
index 35348d4c4209..21319c1e045c 100644
--- a/arch/loongarch/include/asm/pgtable-bits.h
+++ b/arch/loongarch/include/asm/pgtable-bits.h
@@ -105,13 +105,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
return __pgprot(prot);
}
+extern bool wc_enabled;
+
#define pgprot_writecombine pgprot_writecombine
static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
{
unsigned long prot = pgprot_val(_prot);
- prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
+ prot = (prot & ~_CACHE_MASK) | (wc_enabled ? _CACHE_WUC : _CACHE_SUC);
return __pgprot(prot);
}
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index 65518bb8f472..1ec8e4c4cc2b 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -18,7 +18,7 @@
.text
.cfi_sections .debug_frame
.align 5
-SYM_FUNC_START(handle_syscall)
+SYM_CODE_START(handle_syscall)
csrrd t0, PERCPU_BASE_KS
la.pcrel t1, kernelsp
add.d t1, t1, t0
@@ -71,7 +71,7 @@ SYM_FUNC_START(handle_syscall)
bl do_syscall
RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_syscall)
+SYM_CODE_END(handle_syscall)
_ASM_NOKPROBE(handle_syscall)
SYM_CODE_START(ret_from_fork)
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 78f066384657..2bb3aa2dcfcb 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -31,7 +31,7 @@ SYM_FUNC_START(__arch_cpu_idle)
1: jr ra
SYM_FUNC_END(__arch_cpu_idle)
-SYM_FUNC_START(handle_vint)
+SYM_CODE_START(handle_vint)
BACKUP_T0T1
SAVE_ALL
la_abs t1, __arch_cpu_idle
@@ -46,11 +46,11 @@ SYM_FUNC_START(handle_vint)
la_abs t0, do_vint
jirl ra, t0, 0
RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_vint)
+SYM_CODE_END(handle_vint)
-SYM_FUNC_START(except_vec_cex)
+SYM_CODE_START(except_vec_cex)
b cache_parity_error
-SYM_FUNC_END(except_vec_cex)
+SYM_CODE_END(except_vec_cex)
.macro build_prep_badv
csrrd t0, LOONGARCH_CSR_BADV
@@ -66,7 +66,7 @@ SYM_FUNC_END(except_vec_cex)
.macro BUILD_HANDLER exception handler prep
.align 5
- SYM_FUNC_START(handle_\exception)
+ SYM_CODE_START(handle_\exception)
666:
BACKUP_T0T1
SAVE_ALL
@@ -76,7 +76,7 @@ SYM_FUNC_END(except_vec_cex)
jirl ra, t0, 0
668:
RESTORE_ALL_AND_RET
- SYM_FUNC_END(handle_\exception)
+ SYM_CODE_END(handle_\exception)
SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
.endm
@@ -93,7 +93,7 @@ SYM_FUNC_END(except_vec_cex)
BUILD_HANDLER watch watch none
BUILD_HANDLER reserved reserved none /* others */
-SYM_FUNC_START(handle_sys)
+SYM_CODE_START(handle_sys)
la_abs t0, handle_syscall
jr t0
-SYM_FUNC_END(handle_sys)
+SYM_CODE_END(handle_sys)
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 7783f0a3d742..aed65915e932 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -161,19 +161,19 @@ static void __init smbios_parse(void)
}
#ifdef CONFIG_ARCH_WRITECOMBINE
-pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
+bool wc_enabled = true;
#else
-pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
+bool wc_enabled = false;
#endif
-EXPORT_SYMBOL(pgprot_wc);
+EXPORT_SYMBOL(wc_enabled);
static int __init setup_writecombine(char *p)
{
if (!strcmp(p, "on"))
- pgprot_wc = PAGE_KERNEL_WUC;
+ wc_enabled = true;
else if (!strcmp(p, "off"))
- pgprot_wc = PAGE_KERNEL_SUC;
+ wc_enabled = false;
else
pr_warn("Unknown writecombine setting \"%s\".\n", p);
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index f3fe8c06ba4d..4dd53427f657 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -43,11 +43,11 @@ void copy_user_highpage(struct page *to, struct page *from,
{
void *vfrom, *vto;
- vto = kmap_atomic(to);
- vfrom = kmap_atomic(from);
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
copy_page(vto, vfrom);
- kunmap_atomic(vfrom);
- kunmap_atomic(vto);
+ kunmap_local(vfrom);
+ kunmap_local(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
@@ -240,6 +240,7 @@ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pud_table);
#endif
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
index ca17dd3a1915..d5d682f3d29f 100644
--- a/arch/loongarch/mm/tlbex.S
+++ b/arch/loongarch/mm/tlbex.S
@@ -17,7 +17,7 @@
#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
.macro tlb_do_page_fault, write
- SYM_FUNC_START(tlb_do_page_fault_\write)
+ SYM_CODE_START(tlb_do_page_fault_\write)
SAVE_ALL
csrrd a2, LOONGARCH_CSR_BADV
move a0, sp
@@ -25,13 +25,13 @@
li.w a1, \write
bl do_page_fault
RESTORE_ALL_AND_RET
- SYM_FUNC_END(tlb_do_page_fault_\write)
+ SYM_CODE_END(tlb_do_page_fault_\write)
.endm
tlb_do_page_fault 0
tlb_do_page_fault 1
-SYM_FUNC_START(handle_tlb_protect)
+SYM_CODE_START(handle_tlb_protect)
BACKUP_T0T1
SAVE_ALL
move a0, sp
@@ -41,9 +41,9 @@ SYM_FUNC_START(handle_tlb_protect)
la_abs t0, do_page_fault
jirl ra, t0, 0
RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_tlb_protect)
+SYM_CODE_END(handle_tlb_protect)
-SYM_FUNC_START(handle_tlb_load)
+SYM_CODE_START(handle_tlb_load)
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -187,16 +187,16 @@ nopage_tlb_load:
csrrd ra, EXCEPTION_KS2
la_abs t0, tlb_do_page_fault_0
jr t0
-SYM_FUNC_END(handle_tlb_load)
+SYM_CODE_END(handle_tlb_load)
-SYM_FUNC_START(handle_tlb_load_ptw)
+SYM_CODE_START(handle_tlb_load_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_0
jr t0
-SYM_FUNC_END(handle_tlb_load_ptw)
+SYM_CODE_END(handle_tlb_load_ptw)
-SYM_FUNC_START(handle_tlb_store)
+SYM_CODE_START(handle_tlb_store)
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -343,16 +343,16 @@ nopage_tlb_store:
csrrd ra, EXCEPTION_KS2
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_store)
+SYM_CODE_END(handle_tlb_store)
-SYM_FUNC_START(handle_tlb_store_ptw)
+SYM_CODE_START(handle_tlb_store_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_store_ptw)
+SYM_CODE_END(handle_tlb_store_ptw)
-SYM_FUNC_START(handle_tlb_modify)
+SYM_CODE_START(handle_tlb_modify)
csrwr t0, EXCEPTION_KS0
csrwr t1, EXCEPTION_KS1
csrwr ra, EXCEPTION_KS2
@@ -497,16 +497,16 @@ nopage_tlb_modify:
csrrd ra, EXCEPTION_KS2
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_modify)
+SYM_CODE_END(handle_tlb_modify)
-SYM_FUNC_START(handle_tlb_modify_ptw)
+SYM_CODE_START(handle_tlb_modify_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la_abs t0, tlb_do_page_fault_1
jr t0
-SYM_FUNC_END(handle_tlb_modify_ptw)
+SYM_CODE_END(handle_tlb_modify_ptw)
-SYM_FUNC_START(handle_tlb_refill)
+SYM_CODE_START(handle_tlb_refill)
csrwr t0, LOONGARCH_CSR_TLBRSAVE
csrrd t0, LOONGARCH_CSR_PGD
lddir t0, t0, 3
@@ -521,4 +521,4 @@ SYM_FUNC_START(handle_tlb_refill)
tlbfill
csrrd t0, LOONGARCH_CSR_TLBRSAVE
ertn
-SYM_FUNC_END(handle_tlb_refill)
+SYM_CODE_END(handle_tlb_refill)
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 7b2ac1319d70..467ee6b95ae1 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -592,7 +592,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
- pte_t *ptep, entry, old_pte;
+ pte_t *ptep, entry;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
@@ -664,7 +664,6 @@ retry:
entry = pfn_pte(pfn, __pgprot(prot_bits));
/* Write the PTE */
- old_pte = *ptep;
set_pte(ptep, entry);
err = 0;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3aaadfd2c8eb..d5d5388973ac 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER
default "6" if PPC32 && PPC_64K_PAGES
range 4 10 if PPC32 && PPC_256K_PAGES
default "4" if PPC32 && PPC_256K_PAGES
- range 10 10
+ range 10 12
default "10"
help
The kernel page allocator limits the size of maximal physically
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 2f1026fba00d..20f72cd1d813 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -948,6 +948,8 @@ void __init setup_arch(char **cmdline_p)
/* Parse memory topology */
mem_topology_setup();
+ /* Set max_mapnr before paging_init() */
+ set_max_mapnr(max_pfn);
/*
* Release secondary cpus out of their spinloops at 0x60 now that
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 253620979d0c..6dd2f46bd3ef 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if ((yield_count & 1) == 0)
goto yield_prev; /* owner vcpu is running */
+ if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
+ goto yield_prev; /* re-sample lock owner */
+
spin_end();
preempted = true;
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 39acc2cbab4c..9e1f6558d026 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
smp_mb(); /* see radix__flush_tlb_mm */
exit_flush_lazy_tlbs(mm);
- _tlbiel_pid(mm->context.id, RIC_FLUSH_ALL);
-
- /*
- * It should not be possible to have coprocessors still
- * attached here.
- */
- if (WARN_ON_ONCE(atomic_read(&mm->context.copros) > 0))
- __flush_all_mm(mm, true);
+ __flush_all_mm(mm, true);
preempt_enable();
} else {
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8b121df7b08f..07e8f4f1e07f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -288,7 +288,6 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- set_max_mapnr(max_pfn);
kasan_late_init();
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 3ba9fe411604..4d69bfb9bc11 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -104,6 +104,8 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
/* Embedded type MMU with HW exec support. This is a bit more complicated
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
+ *
+ * This is also called once for the folio. So only work with folio->flags here.
*/
static inline pte_t set_pte_filter(pte_t pte)
{
@@ -190,29 +192,39 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte, unsigned int nr)
{
- /*
- * Make sure hardware valid bit is not set. We don't do
- * tlb flush for this update.
- */
- VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
- * is called.
+ * is called. Filter the pte value and use the filtered value
+ * to setup all the ptes in the range.
*/
pte = set_pte_filter(pte);
- /* Perform the setting of the PTE */
- arch_enter_lazy_mmu_mode();
+ /*
+ * We don't need to call arch_enter/leave_lazy_mmu_mode()
+ * because we expect set_ptes to be only be used on not present
+ * and not hw_valid ptes. Hence there is no translation cache flush
+ * involved that need to be batched.
+ */
for (;;) {
+
+ /*
+ * Make sure hardware valid bit is not set. We don't do
+ * tlb flush for this update.
+ */
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+
+ /* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
if (--nr == 0)
break;
ptep++;
- pte = __pte(pte_val(pte) + (1UL << PTE_RPN_SHIFT));
addr += PAGE_SIZE;
+ /*
+ * increment the pfn.
+ */
+ pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte)));
}
- arch_leave_lazy_mmu_mode();
}
void unmap_kernel_page(unsigned long va)
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 1a587618015c..18daafbe2e65 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -66,7 +66,7 @@ static int match_context(const void *v, struct file *file, unsigned fd)
*/
static struct spu_context *coredump_next_context(int *fd)
{
- struct spu_context *ctx;
+ struct spu_context *ctx = NULL;
struct file *file;
int n = iterate_fd(current->files, *fd, match_context, NULL);
if (!n)
@@ -74,10 +74,13 @@ static struct spu_context *coredump_next_context(int *fd)
*fd = n - 1;
rcu_read_lock();
- file = lookup_fd_rcu(*fd);
- ctx = SPUFS_I(file_inode(file))->i_ctx;
- get_spu_context(ctx);
+ file = lookup_fdget_rcu(*fd);
rcu_read_unlock();
+ if (file) {
+ ctx = SPUFS_I(file_inode(file))->i_ctx;
+ get_spu_context(ctx);
+ fput(file);
+ }
return ctx;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 38c5be34c895..10c1320adfd0 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -86,7 +86,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
out:
return inode;
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index d607ab0f7c6d..9c48fecc6719 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -273,11 +273,9 @@ config RISCV_DMA_NONCOHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB
- select DMA_DIRECT_REMAP if MMU
config RISCV_NONSTANDARD_CACHE_OPS
bool
- depends on RISCV_DMA_NONCOHERENT
help
This enables function pointer support for non-standard noncoherent
systems to handle cache management.
@@ -550,6 +548,7 @@ config RISCV_ISA_ZICBOM
depends on RISCV_ALTERNATIVE
default y
select RISCV_DMA_NONCOHERENT
+ select DMA_DIRECT_REMAP
help
Adds support to dynamically detect the presence of the ZICBOM
extension (Cache Block Management Operations) and enable its
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index 566bcefeab50..e2c731cfed8c 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -77,6 +77,7 @@ config ERRATA_THEAD_PBMT
config ERRATA_THEAD_CMO
bool "Apply T-Head cache management errata"
depends on ERRATA_THEAD && MMU
+ select DMA_DIRECT_REMAP
select RISCV_DMA_NONCOHERENT
default y
help
diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
index 12ebe9792356..2c02358abd71 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
@@ -431,7 +431,7 @@
};
ss-pins {
- pinmux = <GPIOMUX(48, GPOUT_SYS_SPI0_FSS,
+ pinmux = <GPIOMUX(49, GPOUT_SYS_SPI0_FSS,
GPOEN_ENABLE,
GPI_SYS_SPI0_FSS)>;
bias-disable;
diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi
index ce708183b6f6..ff364709a6df 100644
--- a/arch/riscv/boot/dts/thead/th1520.dtsi
+++ b/arch/riscv/boot/dts/thead/th1520.dtsi
@@ -139,6 +139,7 @@
interrupt-parent = <&plic>;
#address-cells = <2>;
#size-cells = <2>;
+ dma-noncoherent;
ranges;
plic: interrupt-controller@ffd8000000 {
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 6115d7514972..90d4ba36d1d0 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -72,7 +72,7 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
}
pagefault_out_of_memory();
return;
- } else if (fault & VM_FAULT_SIGBUS) {
+ } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, addr);
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index e4a2ace92dbe..b52f0210481f 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -183,15 +183,22 @@ void set_huge_pte_at(struct mm_struct *mm,
pte_t pte,
unsigned long sz)
{
+ unsigned long hugepage_shift;
int i, pte_num;
- if (!pte_napot(pte)) {
- set_pte_at(mm, addr, ptep, pte);
- return;
- }
+ if (sz >= PGDIR_SIZE)
+ hugepage_shift = PGDIR_SHIFT;
+ else if (sz >= P4D_SIZE)
+ hugepage_shift = P4D_SHIFT;
+ else if (sz >= PUD_SIZE)
+ hugepage_shift = PUD_SHIFT;
+ else if (sz >= PMD_SIZE)
+ hugepage_shift = PMD_SHIFT;
+ else
+ hugepage_shift = PAGE_SHIFT;
- pte_num = napot_pte_num(napot_cont_order(pte));
- for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
+ pte_num = sz >> hugepage_shift;
+ for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
set_pte_at(mm, addr, ptep, pte);
}
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 01257ce3b89c..442a74f113cb 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -57,6 +57,7 @@ static void kasan_populate_shadow(void)
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
+ unsigned long memgap_start = 0;
unsigned long untracked_end;
unsigned long start, end;
int i;
@@ -101,8 +102,12 @@ static void kasan_populate_shadow(void)
* +- shadow end ----+---------+- shadow end ---+
*/
- for_each_physmem_usable_range(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end) {
kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
+ if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
+ kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
+ memgap_start = end;
+ }
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index ada83149932f..858beaf4a8cb 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -53,7 +53,7 @@ static void hypfs_update_update(struct super_block *sb)
struct inode *inode = d_inode(sb_info->update_file);
sb_info->last_update = ktime_get_seconds();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
/* directory tree removal functions */
@@ -101,7 +101,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
ret->i_mode = mode;
ret->i_uid = hypfs_info->uid;
ret->i_gid = hypfs_info->gid;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
if (S_ISDIR(mode))
set_nlink(ret, 2);
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c1b47d608a2b..efaebba5ee19 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -303,11 +303,6 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
return 0;
}
-static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
-{
- return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa);
-}
-
static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
{
set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
@@ -3216,11 +3211,12 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
if (!gi->origin)
return;
- if (gi->alert.mask)
- KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
- kvm, gi->alert.mask);
- while (gisa_in_alert_list(gi->origin))
- cpu_relax();
+ WARN(gi->alert.mask != 0x00,
+ "unexpected non zero alert.mask 0x%02x",
+ gi->alert.mask);
+ gi->alert.mask = 0x00;
+ if (gisa_set_iam(gi->origin, gi->alert.mask))
+ process_gib_alert_list();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 2d9b01d7ca4c..99209085c75b 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -564,6 +564,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
s->dma_length = 0;
}
}
+
+static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
+{
+ size_t n = BITS_TO_LONGS(bits);
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
+ return NULL;
+
+ return vzalloc(bytes);
+}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
@@ -604,13 +615,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->end_dma - zdev->start_dma + 1);
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
- zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto free_dma_table;
}
if (!s390_iommu_strict) {
- zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
goto free_bitmap;
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 84ad709cbecb..66eda40fce36 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -453,5 +453,5 @@ ccslow: cmp %g1, 0
* we only bother with faults on loads... */
cc_fault:
- ret
+ retl
clr %o0
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index dc8c876fbd8f..80d76aea1f7b 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
return ES_OK;
}
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ return ES_OK;
+}
+
+static bool fault_in_kernel_space(unsigned long address)
+{
+ return false;
+}
+
#undef __init
#define __init
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index 31089b851c4f..a2be3aefff9f 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -157,7 +157,8 @@ static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) {
static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
#endif
-extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
+extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
+ unsigned int size, u64 xfeatures, u32 pkru);
extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 637fa1df3512..c715097e92fd 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -69,6 +69,8 @@ struct legacy_pic {
void (*make_irq)(unsigned int irq);
};
+void legacy_pic_pcat_compat(void);
+
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 5fcd85fd64fd..197316121f04 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -27,6 +27,7 @@
* _X - regular server parts
* _D - micro server parts
* _N,_P - other mobile parts
+ * _H - premium mobile parts
* _S - other client parts
*
* Historical OPTDIFFs:
@@ -124,6 +125,7 @@
#define INTEL_FAM6_METEORLAKE 0xAC
#define INTEL_FAM6_METEORLAKE_L 0xAA
+#define INTEL_FAM6_ARROWLAKE_H 0xC5
#define INTEL_FAM6_ARROWLAKE 0xC6
#define INTEL_FAM6_LUNARLAKE_M 0xBD
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 17715cb8731d..70d139406bc8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -528,7 +528,6 @@ struct kvm_pmu {
u64 raw_event_mask;
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
- struct irq_work irq_work;
/*
* Overlay the bitmap with a 64-bit atomic so that all bits can be
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 19bf955b67e0..3ac0ffc4f3e2 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -268,6 +268,7 @@ enum avic_ipi_failure_cause {
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
AVIC_IPI_FAILURE_INVALID_TARGET,
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+ AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
};
#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8bae40a66282..5c367c1290c3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -496,7 +496,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len);
#define copy_mc_to_kernel copy_mc_to_kernel
unsigned long __must_check
-copy_mc_to_user(void *to, const void *from, unsigned len);
+copy_mc_to_user(void __user *to, const void *from, unsigned len);
#endif
/*
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2a0ea38955df..c55c0ef47a18 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -148,6 +148,9 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
pr_debug("Local APIC address 0x%08x\n", madt->address);
}
+ if (madt->flags & ACPI_MADT_PCAT_COMPAT)
+ legacy_pic_pcat_compat();
+
/* ACPI 6.3 and newer support the online capable bit. */
if (acpi_gbl_FADT.header.revision > 6 ||
(acpi_gbl_FADT.header.revision == 6 &&
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index a86d37052a64..a21a4d0ecc34 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
- unsigned int size, u32 pkru)
+ unsigned int size, u64 xfeatures, u32 pkru)
{
struct fpstate *kstate = gfpu->fpstate;
union fpregs_state *ustate = buf;
struct membuf mb = { .p = buf, .left = size };
if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
- __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
+ __copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
+ XSTATE_COPY_XSAVE);
} else {
memcpy(&ustate->fxsave, &kstate->regs.fxsave,
sizeof(ustate->fxsave));
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index cadf68737e6b..ef6906107c54 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1049,6 +1049,7 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
* __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
* @to: membuf descriptor
* @fpstate: The fpstate buffer from which to copy
+ * @xfeatures: The mask of xfeatures to save (XSAVE mode only)
* @pkru_val: The PKRU value to store in the PKRU component
* @copy_mode: The requested copy mode
*
@@ -1059,7 +1060,8 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
* It supports partial copy but @to.pos always starts from zero.
*/
void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
- u32 pkru_val, enum xstate_copy_mode copy_mode)
+ u64 xfeatures, u32 pkru_val,
+ enum xstate_copy_mode copy_mode)
{
const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
struct xregs_state *xinit = &init_fpstate.regs.xsave;
@@ -1083,7 +1085,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
break;
case XSTATE_COPY_XSAVE:
- header.xfeatures &= fpstate->user_xfeatures;
+ header.xfeatures &= fpstate->user_xfeatures & xfeatures;
break;
}
@@ -1185,6 +1187,7 @@ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
enum xstate_copy_mode copy_mode)
{
__copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
+ tsk->thread.fpu.fpstate->user_xfeatures,
tsk->thread.pkru, copy_mode);
}
@@ -1536,10 +1539,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
fpregs_restore_userregs();
newfps->xfeatures = curfps->xfeatures | xfeatures;
-
- if (!guest_fpu)
- newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
-
+ newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
newfps->xfd = curfps->xfd & ~xfeatures;
/* Do the final updates within the locked region */
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index a4ecb04d8d64..3518fb26d06b 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -43,7 +43,8 @@ enum xstate_copy_mode {
struct membuf;
extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
- u32 pkru_val, enum xstate_copy_mode copy_mode);
+ u64 xfeatures, u32 pkru_val,
+ enum xstate_copy_mode copy_mode);
extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
enum xstate_copy_mode mode);
extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 30a55207c000..c20d1832c481 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,6 +32,7 @@
*/
static void init_8259A(int auto_eoi);
+static bool pcat_compat __ro_after_init;
static int i8259A_auto_eoi;
DEFINE_RAW_SPINLOCK(i8259A_lock);
@@ -299,15 +300,32 @@ static void unmask_8259A(void)
static int probe_8259A(void)
{
+ unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR);
unsigned long flags;
- unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
- unsigned char new_val;
+
+ /*
+ * If MADT has the PCAT_COMPAT flag set, then do not bother probing
+ * for the PIC. Some BIOSes leave the PIC uninitialized and probing
+ * fails.
+ *
+ * Right now this causes problems as quite some code depends on
+ * nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly
+ * when the system has an IO/APIC because then PIC is not required
+ * at all, except for really old machines where the timer interrupt
+ * must be routed through the PIC. So just pretend that the PIC is
+ * there and let legacy_pic->init() initialize it for nothing.
+ *
+ * Alternatively this could just try to initialize the PIC and
+ * repeat the probe, but for cases where there is no PIC that's
+ * just pointless.
+ */
+ if (pcat_compat)
+ return nr_legacy_irqs();
+
/*
- * Check to see if we have a PIC.
- * Mask all except the cascade and read
- * back the value we just wrote. If we don't
- * have a PIC, we will read 0xff as opposed to the
- * value we wrote.
+ * Check to see if we have a PIC. Mask all except the cascade and
+ * read back the value we just wrote. If we don't have a PIC, we
+ * will read 0xff as opposed to the value we wrote.
*/
raw_spin_lock_irqsave(&i8259A_lock, flags);
@@ -429,5 +447,9 @@ static int __init i8259A_init_ops(void)
return 0;
}
-
device_initcall(i8259A_init_ops);
+
+void __init legacy_pic_pcat_compat(void)
+{
+ pcat_compat = true;
+}
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index dcf325b7b022..ccb0915e84e1 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -632,6 +632,23 @@ fail:
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}
+static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
+ unsigned long address,
+ bool write)
+{
+ if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_USER;
+ ctxt->fi.cr2 = address;
+ if (write)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return ES_EXCEPTION;
+ }
+
+ return ES_OK;
+}
+
static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
void *src, char *buf,
unsigned int data_size,
@@ -639,7 +656,12 @@ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
bool backwards)
{
int i, b = backwards ? -1 : 1;
- enum es_result ret = ES_OK;
+ unsigned long address = (unsigned long)src;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, false);
+ if (ret != ES_OK)
+ return ret;
for (i = 0; i < count; i++) {
void *s = src + (i * data_size * b);
@@ -660,7 +682,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
bool backwards)
{
int i, s = backwards ? -1 : 1;
- enum es_result ret = ES_OK;
+ unsigned long address = (unsigned long)dst;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, true);
+ if (ret != ES_OK)
+ return ret;
for (i = 0; i < count; i++) {
void *d = dst + (i * data_size * s);
@@ -696,6 +723,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
{
struct insn *insn = &ctxt->insn;
+ size_t size;
+ u64 port;
+
*exitinfo = 0;
switch (insn->opcode.bytes[0]) {
@@ -704,7 +734,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0x6d:
*exitinfo |= IOIO_TYPE_INS;
*exitinfo |= IOIO_SEG_ES;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* OUTS opcodes */
@@ -712,41 +742,43 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0x6f:
*exitinfo |= IOIO_TYPE_OUTS;
*exitinfo |= IOIO_SEG_DS;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* IN immediate opcodes */
case 0xe4:
case 0xe5:
*exitinfo |= IOIO_TYPE_IN;
- *exitinfo |= (u8)insn->immediate.value << 16;
+ port = (u8)insn->immediate.value & 0xffff;
break;
/* OUT immediate opcodes */
case 0xe6:
case 0xe7:
*exitinfo |= IOIO_TYPE_OUT;
- *exitinfo |= (u8)insn->immediate.value << 16;
+ port = (u8)insn->immediate.value & 0xffff;
break;
/* IN register opcodes */
case 0xec:
case 0xed:
*exitinfo |= IOIO_TYPE_IN;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* OUT register opcodes */
case 0xee:
case 0xef:
*exitinfo |= IOIO_TYPE_OUT;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
default:
return ES_DECODE_FAILED;
}
+ *exitinfo |= port << 16;
+
switch (insn->opcode.bytes[0]) {
case 0x6c:
case 0x6e:
@@ -756,12 +788,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0xee:
/* Single byte opcodes */
*exitinfo |= IOIO_DATA_8;
+ size = 1;
break;
default:
/* Length determined by instruction parsing */
*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
: IOIO_DATA_32;
+ size = (insn->opnd_bytes == 2) ? 2 : 4;
}
+
switch (insn->addr_bytes) {
case 2:
*exitinfo |= IOIO_ADDR_16;
@@ -777,7 +812,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
if (insn_has_rep_prefix(insn))
*exitinfo |= IOIO_REP;
- return ES_OK;
+ return vc_ioio_check(ctxt, (u16)port, size);
}
static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index d8c1e3be74c0..6395bfd87b68 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -524,6 +524,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
return ES_OK;
}
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ BUG_ON(size > 4);
+
+ if (user_mode(ctxt->regs)) {
+ struct thread_struct *t = &current->thread;
+ struct io_bitmap *iobm = t->io_bitmap;
+ size_t idx;
+
+ if (!iobm)
+ goto fault;
+
+ for (idx = port; idx < port + size; ++idx) {
+ if (test_bit(idx, iobm->bitmap))
+ goto fault;
+ }
+ }
+
+ return ES_OK;
+
+fault:
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+
+ return ES_EXCEPTION;
+}
+
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
@@ -1508,6 +1535,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ES_DECODE_FAILED;
}
+ if (user_mode(ctxt->regs))
+ return ES_UNSUPPORTED;
+
switch (mmio) {
case INSN_MMIO_WRITE:
memcpy(ghcb->shared_buffer, reg_data, bytes);
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index bbc440c93e08..1123ef3ccf90 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -15,6 +15,7 @@
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
+#include <linux/workqueue.h>
#include <linux/topology.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
@@ -342,6 +343,13 @@ static inline unsigned int loop_timeout(int cpu)
return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
}
+static void tsc_sync_mark_tsc_unstable(struct work_struct *work)
+{
+ mark_tsc_unstable("check_tsc_sync_source failed");
+}
+
+static DECLARE_WORK(tsc_sync_work, tsc_sync_mark_tsc_unstable);
+
/*
* The freshly booted CPU initiates this via an async SMP function call.
*/
@@ -395,7 +403,7 @@ retry:
"turning off TSC clock.\n", max_warp);
if (random_warps)
pr_warn("TSC warped randomly between CPUs\n");
- mark_tsc_unstable("check_tsc_sync_source failed");
+ schedule_work(&tsc_sync_work);
}
/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 0544e30b4946..773132c3bf5a 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -360,14 +360,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 =
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
- /*
- * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
- * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
- * supported by the host.
- */
- vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
- XFEATURE_MASK_FPSSE;
-
kvm_update_pv_runtime(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index dcd60b39e794..3e977dbbf993 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2759,13 +2759,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{
u32 reg = kvm_lapic_get_reg(apic, lvt_type);
int vector, mode, trig_mode;
+ int r;
if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
vector = reg & APIC_VECTOR_MASK;
mode = reg & APIC_MODE_MASK;
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
- return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
- NULL);
+
+ r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
+ if (r && lvt_type == APIC_LVTPC)
+ kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
+ return r;
}
return 0;
}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index edb89b51b383..9ae07db6f0f6 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -93,14 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
#undef __KVM_X86_PMU_OP
}
-static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
-{
- struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
- struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
- kvm_pmu_deliver_pmi(vcpu);
-}
-
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -124,20 +116,7 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
}
- if (!pmc->intr || skip_pmi)
- return;
-
- /*
- * Inject PMI. If vcpu was in a guest mode during NMI PMI
- * can be ejected on a guest mode re-entry. Otherwise we can't
- * be sure that vcpu wasn't executing hlt instruction at the
- * time of vmexit and is not going to re-enter guest mode until
- * woken up. So we should wake it, but this is impossible from
- * NMI context. Do it from irq work instead.
- */
- if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
- irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
- else
+ if (pmc->intr && !skip_pmi)
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
}
@@ -675,9 +654,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-
- irq_work_sync(&pmu->irq_work);
static_call(kvm_x86_pmu_reset)(vcpu);
}
@@ -687,7 +663,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
memset(pmu, 0, sizeof(*pmu));
static_call(kvm_x86_pmu_init)(vcpu);
- init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
pmu->event_count = 0;
pmu->need_cleanup = false;
kvm_pmu_refresh(vcpu);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 7d9ba301c090..1d64113de488 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -74,6 +74,12 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
return counter & pmc_bitmask(pmc);
}
+static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
+{
+ pmc->counter += val - pmc_read_counter(pmc);
+ pmc->counter &= pmc_bitmask(pmc);
+}
+
static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
{
if (pmc->perf_event) {
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 2092db892d7d..4b74ea91f4e6 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -529,8 +529,11 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
break;
+ case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
+ /* Invalid IPI with vector < 16 */
+ break;
default:
- pr_err("Unknown IPI interception\n");
+ vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
}
return 1;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index dd496c9e5f91..3fea8c47679e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1253,6 +1253,9 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
nested_svm_uninit_mmu_context(vcpu);
vmcb_mark_all_dirty(svm->vmcb);
+
+ if (kvm_apicv_activated(vcpu->kvm))
+ kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
}
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index cef5a3d0abd0..373ff6a6687b 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -160,7 +160,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* MSR_PERFCTRn */
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
if (pmc) {
- pmc->counter += data - pmc_read_counter(pmc);
+ pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
return 0;
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9507df93f410..beea99c8e8e0 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -691,7 +691,7 @@ static int svm_hardware_enable(void)
*/
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
struct sev_es_save_area *hostsa;
- u32 msr_hi;
+ u32 __maybe_unused msr_hi;
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
@@ -913,8 +913,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
if (intercept == svm->x2avic_msrs_intercepted)
return;
- if (!x2avic_enabled ||
- !apic_x2apic_mode(svm->vcpu.arch.apic))
+ if (!x2avic_enabled)
return;
for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index f2efa0bf7ae8..820d3e1f6b4f 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -436,11 +436,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated &&
!(msr & MSR_PMC_FULL_WIDTH_BIT))
data = (s64)(s32)data;
- pmc->counter += data - pmc_read_counter(pmc);
+ pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
break;
} else if ((pmc = get_fixed_pmc(pmu, msr))) {
- pmc->counter += data - pmc_read_counter(pmc);
+ pmc_write_counter(pmc, data);
pmc_update_sample_period(pmc);
break;
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9f18b06bbda6..41cce5031126 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5382,26 +5382,37 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
return 0;
}
-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
- struct kvm_xsave *guest_xsave)
-{
- if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
- return;
-
- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
- guest_xsave->region,
- sizeof(guest_xsave->region),
- vcpu->arch.pkru);
-}
static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
u8 *state, unsigned int size)
{
+ /*
+ * Only copy state for features that are enabled for the guest. The
+ * state itself isn't problematic, but setting bits in the header for
+ * features that are supported in *this* host but not exposed to the
+ * guest can result in KVM_SET_XSAVE failing when live migrating to a
+ * compatible host without the features that are NOT exposed to the
+ * guest.
+ *
+ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+ * supported by the host.
+ */
+ u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
+ XFEATURE_MASK_FPSSE;
+
if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
return;
- fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
- state, size, vcpu->arch.pkru);
+ fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
+ supported_xcr0, vcpu->arch.pkru);
+}
+
+static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
+{
+ return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
+ sizeof(guest_xsave->region));
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
@@ -12843,6 +12854,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
#endif
+ if (kvm_test_request(KVM_REQ_PMI, vcpu))
+ return true;
+
if (kvm_arch_interrupt_allowed(vcpu) &&
(kvm_cpu_has_interrupt(vcpu) ||
kvm_guest_apic_has_interrupt(vcpu)))
diff --git a/arch/x86/lib/copy_mc.c b/arch/x86/lib/copy_mc.c
index 80efd45a7761..6e8b7e600def 100644
--- a/arch/x86/lib/copy_mc.c
+++ b/arch/x86/lib/copy_mc.c
@@ -70,23 +70,23 @@ unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigne
}
EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
-unsigned long __must_check copy_mc_to_user(void *dst, const void *src, unsigned len)
+unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
{
unsigned long ret;
if (copy_mc_fragile_enabled) {
__uaccess_begin();
- ret = copy_mc_fragile(dst, src, len);
+ ret = copy_mc_fragile((__force void *)dst, src, len);
__uaccess_end();
return ret;
}
if (static_cpu_has(X86_FEATURE_ERMS)) {
__uaccess_begin();
- ret = copy_mc_enhanced_fast_string(dst, src, len);
+ ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
__uaccess_end();
return ret;
}
- return copy_user_generic(dst, src, len);
+ return copy_user_generic((__force void *)dst, src, len);
}
diff --git a/block/bdev.c b/block/bdev.c
index f3b13aa1b7d4..2018d250e131 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -829,6 +829,28 @@ put_blkdev:
}
EXPORT_SYMBOL(blkdev_get_by_dev);
+struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops)
+{
+ struct bdev_handle *handle = kmalloc(sizeof(*handle), GFP_KERNEL);
+ struct block_device *bdev;
+
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+ bdev = blkdev_get_by_dev(dev, mode, holder, hops);
+ if (IS_ERR(bdev)) {
+ kfree(handle);
+ return ERR_CAST(bdev);
+ }
+ handle->bdev = bdev;
+ handle->holder = holder;
+ if (holder)
+ mode |= BLK_OPEN_EXCL;
+ handle->mode = mode;
+ return handle;
+}
+EXPORT_SYMBOL(bdev_open_by_dev);
+
/**
* blkdev_get_by_path - open a block device by name
* @path: path to the block device to open
@@ -867,6 +889,28 @@ struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
}
EXPORT_SYMBOL(blkdev_get_by_path);
+struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops)
+{
+ struct bdev_handle *handle;
+ dev_t dev;
+ int error;
+
+ error = lookup_bdev(path, &dev);
+ if (error)
+ return ERR_PTR(error);
+
+ handle = bdev_open_by_dev(dev, mode, holder, hops);
+ if (!IS_ERR(handle) && (mode & BLK_OPEN_WRITE) &&
+ bdev_read_only(handle->bdev)) {
+ bdev_release(handle);
+ return ERR_PTR(-EACCES);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL(bdev_open_by_path);
+
void blkdev_put(struct block_device *bdev, void *holder)
{
struct gendisk *disk = bdev->bd_disk;
@@ -903,6 +947,13 @@ void blkdev_put(struct block_device *bdev, void *holder)
}
EXPORT_SYMBOL(blkdev_put);
+void bdev_release(struct bdev_handle *handle)
+{
+ blkdev_put(handle->bdev, handle->holder);
+ kfree(handle);
+}
+EXPORT_SYMBOL(bdev_release);
+
/**
* lookup_bdev() - Look up a struct block_device by name.
* @pathname: Name of the block device in the filesystem.
@@ -961,20 +1012,20 @@ void bdev_mark_dead(struct block_device *bdev, bool surprise)
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
bdev->bd_holder_ops->mark_dead(bdev, surprise);
- else
+ else {
+ mutex_unlock(&bdev->bd_holder_lock);
sync_blockdev(bdev);
- mutex_unlock(&bdev->bd_holder_lock);
+ }
invalidate_bdev(bdev);
}
-#ifdef CONFIG_DASD_MODULE
/*
- * Drivers should not use this directly, but the DASD driver has historically
- * had a shutdown to offline mode that doesn't actually remove the gendisk
- * that otherwise looks a lot like a safe device removal.
+ * New drivers should not use this directly. There are some drivers however
+ * that needs this for historical reasons. For example, the DASD driver has
+ * historically had a shutdown to offline mode that doesn't actually remove the
+ * gendisk that otherwise looks a lot like a safe device removal.
*/
EXPORT_SYMBOL_GPL(bdev_mark_dead);
-#endif
void sync_bdevs(bool wait)
{
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 38a881cf97d0..13e4377a8b28 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -723,6 +723,12 @@ static unsigned int calculate_io_allowed(u32 iops_limit,
static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
{
+ /*
+ * Can result be wider than 64 bits?
+ * We check against 62, not 64, due to ilog2 truncation.
+ */
+ if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62)
+ return U64_MAX;
return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
}
diff --git a/block/disk-events.c b/block/disk-events.c
index 13c3372c465a..2f697224386a 100644
--- a/block/disk-events.c
+++ b/block/disk-events.c
@@ -266,11 +266,8 @@ static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
* disk_check_media_change - check if a removable media has been changed
* @disk: gendisk to check
*
- * Check whether a removable media has been changed, and attempt to free all
- * dentries and inodes and invalidates all block device page cache entries in
- * that case.
- *
- * Returns %true if the media has changed, or %false if not.
+ * Returns %true and marks the disk for a partition rescan whether a removable
+ * media has been changed, and %false if the media did not change.
*/
bool disk_check_media_change(struct gendisk *disk)
{
@@ -278,12 +275,11 @@ bool disk_check_media_change(struct gendisk *disk)
events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE |
DISK_EVENT_EJECT_REQUEST);
- if (!(events & DISK_EVENT_MEDIA_CHANGE))
- return false;
-
- bdev_mark_dead(disk->part0, true);
- set_bit(GD_NEED_PART_SCAN, &disk->state);
- return true;
+ if (events & DISK_EVENT_MEDIA_CHANGE) {
+ set_bit(GD_NEED_PART_SCAN, &disk->state);
+ return true;
+ }
+ return false;
}
EXPORT_SYMBOL(disk_check_media_change);
diff --git a/block/fops.c b/block/fops.c
index 73e42742543f..0abaac705daf 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -542,15 +542,31 @@ static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
return error;
}
+/**
+ * file_to_blk_mode - get block open flags from file flags
+ * @file: file whose open flags should be converted
+ *
+ * Look at file open flags and generate corresponding block open flags from
+ * them. The function works both for file just being open (e.g. during ->open
+ * callback) and for file that is already open. This is actually non-trivial
+ * (see comment in the function).
+ */
blk_mode_t file_to_blk_mode(struct file *file)
{
blk_mode_t mode = 0;
+ struct bdev_handle *handle = file->private_data;
if (file->f_mode & FMODE_READ)
mode |= BLK_OPEN_READ;
if (file->f_mode & FMODE_WRITE)
mode |= BLK_OPEN_WRITE;
- if (file->private_data)
+ /*
+ * do_dentry_open() clears O_EXCL from f_flags, use handle->mode to
+ * determine whether the open was exclusive for already open files.
+ */
+ if (handle)
+ mode |= handle->mode & BLK_OPEN_EXCL;
+ else if (file->f_flags & O_EXCL)
mode |= BLK_OPEN_EXCL;
if (file->f_flags & O_NDELAY)
mode |= BLK_OPEN_NDELAY;
@@ -568,7 +584,8 @@ blk_mode_t file_to_blk_mode(struct file *file)
static int blkdev_open(struct inode *inode, struct file *filp)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
+ blk_mode_t mode;
/*
* Preserve backwards compatibility and allow large file access
@@ -579,29 +596,24 @@ static int blkdev_open(struct inode *inode, struct file *filp)
filp->f_flags |= O_LARGEFILE;
filp->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
- /*
- * Use the file private data to store the holder for exclusive openes.
- * file_to_blk_mode relies on it being present to set BLK_OPEN_EXCL.
- */
- if (filp->f_flags & O_EXCL)
- filp->private_data = filp;
-
- bdev = blkdev_get_by_dev(inode->i_rdev, file_to_blk_mode(filp),
- filp->private_data, NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ mode = file_to_blk_mode(filp);
+ handle = bdev_open_by_dev(inode->i_rdev, mode,
+ mode & BLK_OPEN_EXCL ? filp : NULL, NULL);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- if (bdev_nowait(bdev))
+ if (bdev_nowait(handle->bdev))
filp->f_mode |= FMODE_NOWAIT;
- filp->f_mapping = bdev->bd_inode->i_mapping;
+ filp->f_mapping = handle->bdev->bd_inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
+ filp->private_data = handle;
return 0;
}
static int blkdev_release(struct inode *inode, struct file *filp)
{
- blkdev_put(I_BDEV(filp->f_mapping->host), filp->private_data);
+ bdev_release(filp->private_data);
return 0;
}
diff --git a/block/genhd.c b/block/genhd.c
index cc32a0c704eb..c9d06f72c587 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -342,7 +342,7 @@ EXPORT_SYMBOL_GPL(disk_uevent);
int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
int ret = 0;
if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
@@ -366,12 +366,12 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode)
}
set_bit(GD_NEED_PART_SCAN, &disk->state);
- bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL, NULL,
- NULL);
- if (IS_ERR(bdev))
- ret = PTR_ERR(bdev);
+ handle = bdev_open_by_dev(disk_devt(disk), mode & ~BLK_OPEN_EXCL, NULL,
+ NULL);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
else
- blkdev_put(bdev, NULL);
+ bdev_release(handle);
/*
* If blkdev_get_by_dev() failed early, GD_NEED_PART_SCAN is still set,
@@ -559,6 +559,13 @@ static void blk_report_disk_dead(struct gendisk *disk, bool surprise)
struct block_device *bdev;
unsigned long idx;
+ /*
+ * On surprise disk removal, bdev_mark_dead() may call into file
+ * systems below. Make it clear that we're expecting to not hold
+ * disk->open_mutex.
+ */
+ lockdep_assert_not_held(&disk->open_mutex);
+
rcu_read_lock();
xa_for_each(&disk->part_tbl, idx, bdev) {
if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
diff --git a/block/ioctl.c b/block/ioctl.c
index d5f5cd61efd7..4160f4e6bd5b 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -370,9 +370,10 @@ static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
mutex_lock(&bdev->bd_holder_lock);
if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
bdev->bd_holder_ops->sync(bdev);
- else
+ else {
+ mutex_unlock(&bdev->bd_holder_lock);
sync_blockdev(bdev);
- mutex_unlock(&bdev->bd_holder_lock);
+ }
invalidate_bdev(bdev);
return 0;
@@ -467,6 +468,7 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
int __user *argp)
{
int ret, n;
+ struct bdev_handle *handle;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -478,10 +480,11 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
if (mode & BLK_OPEN_EXCL)
return set_blocksize(bdev, n);
- if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode, &bdev, NULL)))
+ handle = bdev_open_by_dev(bdev->bd_dev, mode, &bdev, NULL);
+ if (IS_ERR(handle))
return -EBUSY;
ret = set_blocksize(bdev, n);
- blkdev_put(bdev, &bdev);
+ bdev_release(handle);
return ret;
}
diff --git a/block/partitions/core.c b/block/partitions/core.c
index e137a87f4db0..f47ffcfdfcec 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -274,17 +274,6 @@ void drop_partition(struct block_device *part)
put_device(&part->bd_device);
}
-static void delete_partition(struct block_device *part)
-{
- /*
- * Remove the block device from the inode hash, so that it cannot be
- * looked up any more even when openers still hold references.
- */
- remove_inode_hash(part->bd_inode);
- bdev_mark_dead(part, false);
- drop_partition(part);
-}
-
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -485,7 +474,18 @@ int bdev_del_partition(struct gendisk *disk, int partno)
if (atomic_read(&part->bd_openers))
goto out_unlock;
- delete_partition(part);
+ /*
+ * We verified that @part->bd_openers is zero above and so
+ * @part->bd_holder{_ops} can't be set. And since we hold
+ * @disk->open_mutex the device can't be claimed by anyone.
+ *
+ * So no need to call @part->bd_holder_ops->mark_dead() here.
+ * Just delete the partition and invalidate it.
+ */
+
+ remove_inode_hash(part->bd_inode);
+ invalidate_bdev(part);
+ drop_partition(part);
ret = 0;
out_unlock:
mutex_unlock(&disk->open_mutex);
@@ -663,8 +663,23 @@ rescan:
sync_blockdev(disk->part0);
invalidate_bdev(disk->part0);
- xa_for_each_start(&disk->part_tbl, idx, part, 1)
- delete_partition(part);
+ xa_for_each_start(&disk->part_tbl, idx, part, 1) {
+ /*
+ * Remove the block device from the inode hash, so that
+ * it cannot be looked up any more even when openers
+ * still hold references.
+ */
+ remove_inode_hash(part->bd_inode);
+
+ /*
+ * If @disk->open_partitions isn't elevated but there's
+ * still an active holder of that block device things
+ * are broken.
+ */
+ WARN_ON_ONCE(atomic_read(&part->bd_openers));
+ invalidate_bdev(part);
+ drop_partition(part);
+ }
clear_bit(GD_NEED_PART_SCAN, &disk->state);
/*
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 6d7f25d1711b..04f38a3f5d95 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -2888,12 +2888,11 @@ static int opal_lock_unlock(struct opal_dev *dev,
if (lk_unlk->session.who > OPAL_USER9)
return -EINVAL;
- ret = opal_get_key(dev, &lk_unlk->session.opal_key);
- if (ret)
- return ret;
mutex_lock(&dev->dev_lock);
opal_lock_check_for_saved_key(dev, lk_unlk);
- ret = __opal_lock_unlock(dev, lk_unlk);
+ ret = opal_get_key(dev, &lk_unlk->session.opal_key);
+ if (!ret)
+ ret = __opal_lock_unlock(dev, lk_unlk);
mutex_unlock(&dev->dev_lock);
return ret;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index abeecb8329b3..1dcab27986a6 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -81,14 +81,13 @@ software_key_determine_akcipher(const struct public_key *pkey,
* RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
*/
if (strcmp(encoding, "pkcs1") == 0) {
+ *sig = op == kernel_pkey_sign ||
+ op == kernel_pkey_verify;
if (!hash_algo) {
- *sig = false;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s)",
pkey->pkey_algo);
} else {
- *sig = op == kernel_pkey_sign ||
- op == kernel_pkey_verify;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)",
pkey->pkey_algo, hash_algo);
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 467a60235370..7e9359611d69 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -367,14 +367,19 @@ int ivpu_boot(struct ivpu_device *vdev)
return 0;
}
-int ivpu_shutdown(struct ivpu_device *vdev)
+void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
- int ret;
-
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
+}
+
+int ivpu_shutdown(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_prepare_for_reset(vdev);
ret = ivpu_hw_power_down(vdev);
if (ret)
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 03b3d6532fb6..2adc349126bb 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -151,6 +151,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
+void ivpu_prepare_for_reset(struct ivpu_device *vdev);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 0191cf8e5964..a277bbae78fc 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -220,8 +220,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
- DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
if (!fw->mem) {
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
return -ENOMEM;
@@ -331,7 +330,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
memset(start, 0, size);
}
- clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
+ wmb(); /* Flush WC buffers after writing fw->mem */
return 0;
}
@@ -433,7 +432,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
if (!ivpu_fw_is_cold_boot(vdev)) {
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
- clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ wmb(); /* Flush WC buffers after writing save_restore_ret_address */
return;
}
@@ -495,7 +494,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
- clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ wmb(); /* Flush WC buffers after writing bootparams */
ivpu_fw_boot_params_print(vdev, boot_params);
}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index f4130586ff1b..6b0ceda5f253 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -8,8 +8,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_mm.h>
-#define DRM_IVPU_BO_NOSNOOP 0x10000000
-
struct dma_buf;
struct ivpu_bo_ops;
struct ivpu_file_priv;
@@ -85,9 +83,6 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
- if (bo->flags & DRM_IVPU_BO_NOSNOOP)
- return false;
-
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index ab341237bcf9..1079e06255ba 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -13,6 +13,7 @@ struct ivpu_hw_ops {
int (*power_up)(struct ivpu_device *vdev);
int (*boot_fw)(struct ivpu_device *vdev);
int (*power_down)(struct ivpu_device *vdev);
+ int (*reset)(struct ivpu_device *vdev);
bool (*is_idle)(struct ivpu_device *vdev);
void (*wdt_disable)(struct ivpu_device *vdev);
void (*diagnose_failure)(struct ivpu_device *vdev);
@@ -91,6 +92,13 @@ static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
return vdev->hw->ops->power_down(vdev);
};
+static inline int ivpu_hw_reset(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, PM, "HW reset\n");
+
+ return vdev->hw->ops->reset(vdev);
+};
+
static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
{
vdev->hw->ops->wdt_disable(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 9eae1c241bc0..18be8b98e9a8 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -940,9 +940,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
if (status == 0)
return 0;
- /* Disable global interrupt before handling local buttress interrupts */
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
-
if (REG_TEST_FLD(VPU_37XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
REGB_RD32(VPU_37XX_BUTTRESS_CURRENT_PLL));
@@ -974,9 +971,6 @@ static u32 ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
else
REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
- /* Re-enable global interrupt */
- REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
-
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@@ -988,9 +982,14 @@ static irqreturn_t ivpu_hw_37xx_irq_handler(int irq, void *ptr)
struct ivpu_device *vdev = ptr;
u32 ret_irqv, ret_irqb;
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
ret_irqv = ivpu_hw_37xx_irqv_handler(vdev, irq);
ret_irqb = ivpu_hw_37xx_irqb_handler(vdev, irq);
+ /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
+ REGB_WR32(VPU_37XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
return IRQ_RETVAL(ret_irqb | ret_irqv);
}
@@ -1029,6 +1028,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
.power_down = ivpu_hw_37xx_power_down,
+ .reset = ivpu_hw_37xx_reset,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 8bdb59a45da6..85171a408363 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -1179,6 +1179,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.power_up = ivpu_hw_40xx_power_up,
.is_idle = ivpu_hw_40xx_is_idle,
.power_down = ivpu_hw_40xx_power_down,
+ .reset = ivpu_hw_40xx_reset,
.boot_fw = ivpu_hw_40xx_boot_fw,
.wdt_disable = ivpu_hw_40xx_wdt_disable,
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 1d2e554e2c4a..ce94f4029127 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -11,6 +11,7 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+#define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
@@ -328,12 +329,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
return -EINVAL;
- /*
- * VPU is only 32 bit, but DMA engine is 38 bit
- * Ranges < 2 GB are reserved for VPU internal registers
- * Limit range to 8 GB
- */
- if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
+
+ if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
return -EINVAL;
prot = IVPU_MMU_ENTRY_MAPPED;
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index e6f27daf5560..ffff2496e8e8 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -261,7 +261,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->in_reset, 1);
- ivpu_shutdown(vdev);
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index f41dda2d3493..a4aa53b7e2bb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1410,10 +1410,10 @@ static int __init acpi_init(void)
acpi_init_ffh();
pci_mmcfg_late_init();
- acpi_arm_init();
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
+ acpi_arm_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index c2c786eb95ab..1687483ff319 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -57,6 +57,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
int polarity)
{
struct irq_fwspec fwspec;
+ unsigned int irq;
fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
if (WARN_ON(!fwspec.fwnode)) {
@@ -68,7 +69,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2;
- return irq_create_fwspec_mapping(&fwspec);
+ irq = irq_create_fwspec_mapping(&fwspec);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
}
EXPORT_SYMBOL_GPL(acpi_register_gsi);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f96bf32cd368..7d88db451cfb 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3339,6 +3339,16 @@ static int acpi_nfit_add(struct acpi_device *adev)
acpi_size sz;
int rc = 0;
+ rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
+ acpi_nfit_notify);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
+ adev);
+ if (rc)
+ return rc;
+
status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
if (ACPI_FAILURE(status)) {
/* The NVDIMM root device allows OS to trigger enumeration of
@@ -3386,17 +3396,7 @@ static int acpi_nfit_add(struct acpi_device *adev)
if (rc)
return rc;
- rc = devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
- if (rc)
- return rc;
-
- rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
- acpi_nfit_notify);
- if (rc)
- return rc;
-
- return devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
- adev);
+ return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
}
static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 81effec17b3d..420dc9cbf774 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -152,7 +152,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
goto err;
inode->i_ino = minor + INODE_OFFSET;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR | 0600,
MKDEV(MAJOR(binderfs_dev), minor));
inode->i_fop = &binder_fops;
@@ -431,7 +431,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
}
inode->i_ino = SECOND_INODE;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR | 0600,
MKDEV(MAJOR(binderfs_dev), minor));
inode->i_fop = &binder_ctl_fops;
@@ -473,7 +473,7 @@ static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
}
return ret;
}
@@ -702,7 +702,7 @@ static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
inode->i_ino = FIRST_INODE;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | 0755;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &binderfs_dir_inode_operations;
set_nlink(inode, 2);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a371b497035e..3a957c4da409 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1053,10 +1053,11 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
/*
* Ask the sd driver to issue START STOP UNIT on runtime suspend
- * and resume only. For system level suspend/resume, devices
- * power state is handled directly by libata EH.
+ * and resume and shutdown only. For system level suspend/resume,
+ * devices power state is handled directly by libata EH.
*/
sdev->manage_runtime_start_stop = true;
+ sdev->manage_shutdown = true;
}
/*
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 884cb51c8f67..234a84ecde8b 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1478,7 +1478,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
- return !strcmp((*r)->name, data);
+ return (*r)->name && !strcmp((*r)->name, data);
else
return 1;
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index cd738cab725f..50949207798d 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1760,8 +1760,10 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
/* invalidate the buffer track to force a reread */
BufferDrive = -1;
set_bit(drive, &fake_change);
- if (disk_check_media_change(disk))
+ if (disk_check_media_change(disk)) {
+ bdev_mark_dead(disk->part0, true);
floppy_revalidate(disk);
+ }
return 0;
default:
return -EINVAL;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index a30a5ed811be..f017e917612b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -524,7 +524,9 @@ struct drbd_md {
struct drbd_backing_dev {
struct block_device *backing_bdev;
+ struct bdev_handle *backing_bdev_handle;
struct block_device *md_bdev;
+ struct bdev_handle *md_bdev_handle;
struct drbd_md md;
struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
sector_t known_size; /* last known size of that backing device */
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index d3538bd83fb3..43747a1aae43 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -82,7 +82,7 @@ static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
DEFINE_MUTEX(notification_mutex);
-/* used blkdev_get_by_path, to claim our meta data device(s) */
+/* used bdev_open_by_path, to claim our meta data device(s) */
static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
@@ -1635,43 +1635,45 @@ success:
return 0;
}
-static struct block_device *open_backing_dev(struct drbd_device *device,
+static struct bdev_handle *open_backing_dev(struct drbd_device *device,
const char *bdev_path, void *claim_ptr, bool do_bd_link)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
int err = 0;
- bdev = blkdev_get_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
- claim_ptr, NULL);
- if (IS_ERR(bdev)) {
+ handle = bdev_open_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ claim_ptr, NULL);
+ if (IS_ERR(handle)) {
drbd_err(device, "open(\"%s\") failed with %ld\n",
- bdev_path, PTR_ERR(bdev));
- return bdev;
+ bdev_path, PTR_ERR(handle));
+ return handle;
}
if (!do_bd_link)
- return bdev;
+ return handle;
- err = bd_link_disk_holder(bdev, device->vdisk);
+ err = bd_link_disk_holder(handle->bdev, device->vdisk);
if (err) {
- blkdev_put(bdev, claim_ptr);
+ bdev_release(handle);
drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
bdev_path, err);
- bdev = ERR_PTR(err);
+ handle = ERR_PTR(err);
}
- return bdev;
+ return handle;
}
static int open_backing_devices(struct drbd_device *device,
struct disk_conf *new_disk_conf,
struct drbd_backing_dev *nbc)
{
- struct block_device *bdev;
+ struct bdev_handle *handle;
- bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
- if (IS_ERR(bdev))
+ handle = open_backing_dev(device, new_disk_conf->backing_dev, device,
+ true);
+ if (IS_ERR(handle))
return ERR_OPEN_DISK;
- nbc->backing_bdev = bdev;
+ nbc->backing_bdev = handle->bdev;
+ nbc->backing_bdev_handle = handle;
/*
* meta_dev_idx >= 0: external fixed size, possibly multiple
@@ -1681,7 +1683,7 @@ static int open_backing_devices(struct drbd_device *device,
* should check it for you already; but if you don't, or
* someone fooled it, we need to double check here)
*/
- bdev = open_backing_dev(device, new_disk_conf->meta_dev,
+ handle = open_backing_dev(device, new_disk_conf->meta_dev,
/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
* if potentially shared with other drbd minors */
(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
@@ -1689,20 +1691,21 @@ static int open_backing_devices(struct drbd_device *device,
* as would happen with internal metadata. */
(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
- if (IS_ERR(bdev))
+ if (IS_ERR(handle))
return ERR_OPEN_MD_DISK;
- nbc->md_bdev = bdev;
+ nbc->md_bdev = handle->bdev;
+ nbc->md_bdev_handle = handle;
return NO_ERROR;
}
-static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
- void *claim_ptr, bool do_bd_unlink)
+static void close_backing_dev(struct drbd_device *device,
+ struct bdev_handle *handle, bool do_bd_unlink)
{
- if (!bdev)
+ if (!handle)
return;
if (do_bd_unlink)
- bd_unlink_disk_holder(bdev, device->vdisk);
- blkdev_put(bdev, claim_ptr);
+ bd_unlink_disk_holder(handle->bdev, device->vdisk);
+ bdev_release(handle);
}
void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
@@ -1710,11 +1713,9 @@ void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *
if (ldev == NULL)
return;
- close_backing_dev(device, ldev->md_bdev,
- ldev->md.meta_dev_idx < 0 ?
- (void *)device : (void *)drbd_m_holder,
+ close_backing_dev(device, ldev->md_bdev_handle,
ldev->md_bdev != ldev->backing_bdev);
- close_backing_dev(device, ldev->backing_bdev, device, true);
+ close_backing_dev(device, ldev->backing_bdev_handle, true);
kfree(ldev->disk_conf);
kfree(ldev);
@@ -2130,11 +2131,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
fail:
conn_reconfig_done(connection);
if (nbc) {
- close_backing_dev(device, nbc->md_bdev,
- nbc->disk_conf->meta_dev_idx < 0 ?
- (void *)device : (void *)drbd_m_holder,
+ close_backing_dev(device, nbc->md_bdev_handle,
nbc->md_bdev != nbc->backing_bdev);
- close_backing_dev(device, nbc->backing_bdev, device, true);
+ close_backing_dev(device, nbc->backing_bdev_handle, true);
kfree(nbc);
}
kfree(new_disk_conf);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index ea4eb88a2e45..11114a5d9e5c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3215,8 +3215,10 @@ static int invalidate_drive(struct gendisk *disk)
/* invalidate the buffer track to force a reread */
set_bit((long)disk->private_data, &fake_change);
process_fd_request();
- if (disk_check_media_change(disk))
+ if (disk_check_media_change(disk)) {
+ bdev_mark_dead(disk->part0, true);
floppy_revalidate(disk);
+ }
return 0;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a1428538bda5..d56d972aadb3 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -340,8 +340,8 @@ static ssize_t device_map_show(const struct class *c, const struct class_attribu
n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
pd->disk->disk_name,
MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(pd->bdev->bd_dev),
- MINOR(pd->bdev->bd_dev));
+ MAJOR(pd->bdev_handle->bdev->bd_dev),
+ MINOR(pd->bdev_handle->bdev->bd_dev));
}
mutex_unlock(&ctl_mutex);
return n;
@@ -437,7 +437,8 @@ static int pkt_seq_show(struct seq_file *m, void *p)
char *msg;
int states[PACKET_NUM_STATES];
- seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name, pd->bdev);
+ seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
+ pd->bdev_handle->bdev);
seq_printf(m, "\nSettings:\n");
seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
@@ -714,7 +715,7 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
- struct request_queue *q = bdev_get_queue(pd->bdev);
+ struct request_queue *q = bdev_get_queue(pd->bdev_handle->bdev);
struct scsi_cmnd *scmd;
struct request *rq;
int ret = 0;
@@ -1047,7 +1048,8 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
- bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
+ bio_init(bio, pd->bdev_handle->bdev, bio->bi_inline_vecs, 1,
+ REQ_OP_READ);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1262,8 +1264,8 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
struct device *ddev = disk_to_dev(pd->disk);
int f;
- bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
- REQ_OP_WRITE);
+ bio_init(pkt->w_bio, pd->bdev_handle->bdev, pkt->w_bio->bi_inline_vecs,
+ pkt->frames, REQ_OP_WRITE);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2160,18 +2162,20 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
int ret;
long lba;
struct request_queue *q;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
* to read/write from/to it. It is already opened in O_NONBLOCK mode
* so open should not fail.
*/
- bdev = blkdev_get_by_dev(pd->bdev->bd_dev, BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_dev(pd->bdev_handle->bdev->bd_dev,
+ BLK_OPEN_READ, pd, NULL);
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
goto out;
}
+ pd->open_bdev_handle = bdev_handle;
ret = pkt_get_last_written(pd, &lba);
if (ret) {
@@ -2180,9 +2184,9 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
}
set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
+ set_capacity_and_notify(pd->bdev_handle->bdev->bd_disk, lba << 2);
- q = bdev_get_queue(pd->bdev);
+ q = bdev_get_queue(pd->bdev_handle->bdev);
if (write) {
ret = pkt_open_write(pd);
if (ret)
@@ -2214,7 +2218,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
return 0;
out_putdev:
- blkdev_put(bdev, pd);
+ bdev_release(bdev_handle);
out:
return ret;
}
@@ -2233,7 +2237,8 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_lock_door(pd, 0);
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- blkdev_put(pd->bdev, pd);
+ bdev_release(pd->open_bdev_handle);
+ pd->open_bdev_handle = NULL;
pkt_shrink_pktlist(pd);
}
@@ -2321,8 +2326,8 @@ static void pkt_end_io_read_cloned(struct bio *bio)
static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
{
- struct bio *cloned_bio =
- bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
+ struct bio *cloned_bio = bio_alloc_clone(pd->bdev_handle->bdev, bio,
+ GFP_NOIO, &pkt_bio_set);
struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
psd->pd = pd;
@@ -2492,7 +2497,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
{
struct device *ddev = disk_to_dev(pd->disk);
int i;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
@@ -2503,8 +2508,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
struct pktcdvd_device *pd2 = pkt_devs[i];
if (!pd2)
continue;
- if (pd2->bdev->bd_dev == dev) {
- dev_err(ddev, "%pg already setup\n", pd2->bdev);
+ if (pd2->bdev_handle->bdev->bd_dev == dev) {
+ dev_err(ddev, "%pg already setup\n",
+ pd2->bdev_handle->bdev);
return -EBUSY;
}
if (pd2->pkt_dev == dev) {
@@ -2513,13 +2519,13 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
}
- bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY, NULL,
- NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+ bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
+ sdev = scsi_device_from_queue(bdev_handle->bdev->bd_disk->queue);
if (!sdev) {
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
return -EINVAL;
}
put_device(&sdev->sdev_gendev);
@@ -2527,8 +2533,8 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
- pd->bdev = bdev;
- set_blocksize(bdev, CD_FRAMESIZE);
+ pd->bdev_handle = bdev_handle;
+ set_blocksize(bdev_handle->bdev, CD_FRAMESIZE);
pkt_init_queue(pd);
@@ -2540,11 +2546,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
}
proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", bdev);
+ dev_notice(ddev, "writer mapped to %pg\n", bdev_handle->bdev);
return 0;
out_mem:
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return -ENOMEM;
@@ -2599,9 +2605,9 @@ static unsigned int pkt_check_events(struct gendisk *disk,
if (!pd)
return 0;
- if (!pd->bdev)
+ if (!pd->bdev_handle)
return 0;
- attached_disk = pd->bdev->bd_disk;
+ attached_disk = pd->bdev_handle->bdev->bd_disk;
if (!attached_disk || !attached_disk->fops->check_events)
return 0;
return attached_disk->fops->check_events(attached_disk, clearing);
@@ -2686,7 +2692,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
goto out_mem2;
/* inherit events of the host device */
- disk->events = pd->bdev->bd_disk->events;
+ disk->events = pd->bdev_handle->bdev->bd_disk->events;
ret = add_disk(disk);
if (ret)
@@ -2751,7 +2757,7 @@ static int pkt_remove_dev(dev_t pkt_dev)
pkt_debugfs_dev_remove(pd);
pkt_sysfs_dev_remove(pd);
- blkdev_put(pd->bdev, NULL);
+ bdev_release(pd->bdev_handle);
remove_proc_entry(pd->disk->disk_name, pkt_proc);
dev_notice(ddev, "writer unmapped\n");
@@ -2778,7 +2784,7 @@ static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
if (pd) {
- ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
+ ctrl_cmd->dev = new_encode_dev(pd->bdev_handle->bdev->bd_dev);
ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
} else {
ctrl_cmd->dev = 0;
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index c186df0ec641..65de51f3dfd9 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -145,7 +145,7 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
priv->sess_dev = sess_dev;
priv->id = id;
- bio = bio_alloc(sess_dev->bdev, 1,
+ bio = bio_alloc(sess_dev->bdev_handle->bdev, 1,
rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
if (bio_add_page(bio, virt_to_page(data), datalen,
offset_in_page(data)) != datalen) {
@@ -219,7 +219,7 @@ void rnbd_destroy_sess_dev(struct rnbd_srv_sess_dev *sess_dev, bool keep_id)
rnbd_put_sess_dev(sess_dev);
wait_for_completion(&dc); /* wait for inflights to drop to zero */
- blkdev_put(sess_dev->bdev, NULL);
+ bdev_release(sess_dev->bdev_handle);
mutex_lock(&sess_dev->dev->lock);
list_del(&sess_dev->dev_list);
if (!sess_dev->readonly)
@@ -534,7 +534,7 @@ rnbd_srv_get_or_create_srv_dev(struct block_device *bdev,
static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
- struct block_device *bdev = sess_dev->bdev;
+ struct block_device *bdev = sess_dev->bdev_handle->bdev;
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = cpu_to_le32(sess_dev->device_id);
@@ -559,7 +559,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
static struct rnbd_srv_sess_dev *
rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
const struct rnbd_msg_open *open_msg,
- struct block_device *bdev, bool readonly,
+ struct bdev_handle *handle, bool readonly,
struct rnbd_srv_dev *srv_dev)
{
struct rnbd_srv_sess_dev *sdev = rnbd_sess_dev_alloc(srv_sess);
@@ -571,7 +571,7 @@ rnbd_srv_create_set_sess_dev(struct rnbd_srv_session *srv_sess,
strscpy(sdev->pathname, open_msg->dev_name, sizeof(sdev->pathname));
- sdev->bdev = bdev;
+ sdev->bdev_handle = handle;
sdev->sess = srv_sess;
sdev->dev = srv_dev;
sdev->readonly = readonly;
@@ -676,7 +676,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
struct rnbd_srv_dev *srv_dev;
struct rnbd_srv_sess_dev *srv_sess_dev;
const struct rnbd_msg_open *open_msg = msg;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
blk_mode_t open_flags = BLK_OPEN_READ;
char *full_path;
struct rnbd_msg_open_rsp *rsp = data;
@@ -714,15 +714,15 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto reject;
}
- bdev = blkdev_get_by_path(full_path, open_flags, NULL, NULL);
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_path(full_path, open_flags, NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
pr_err("Opening device '%s' on session %s failed, failed to open the block device, err: %d\n",
full_path, srv_sess->sessname, ret);
goto free_path;
}
- srv_dev = rnbd_srv_get_or_create_srv_dev(bdev, srv_sess,
+ srv_dev = rnbd_srv_get_or_create_srv_dev(bdev_handle->bdev, srv_sess,
open_msg->access_mode);
if (IS_ERR(srv_dev)) {
pr_err("Opening device '%s' on session %s failed, creating srv_dev failed, err: %ld\n",
@@ -731,7 +731,8 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
goto blkdev_put;
}
- srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg, bdev,
+ srv_sess_dev = rnbd_srv_create_set_sess_dev(srv_sess, open_msg,
+ bdev_handle,
open_msg->access_mode == RNBD_ACCESS_RO,
srv_dev);
if (IS_ERR(srv_sess_dev)) {
@@ -747,7 +748,7 @@ static int process_msg_open(struct rnbd_srv_session *srv_sess,
*/
mutex_lock(&srv_dev->lock);
if (!srv_dev->dev_kobj.state_in_sysfs) {
- ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev);
+ ret = rnbd_srv_create_dev_sysfs(srv_dev, bdev_handle->bdev);
if (ret) {
mutex_unlock(&srv_dev->lock);
rnbd_srv_err(srv_sess_dev,
@@ -790,7 +791,7 @@ srv_dev_put:
}
rnbd_put_srv_dev(srv_dev);
blkdev_put:
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
free_path:
kfree(full_path);
reject:
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 1027656dedb0..343cc682b617 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -46,7 +46,7 @@ struct rnbd_srv_dev {
struct rnbd_srv_sess_dev {
/* Entry inside rnbd_srv_dev struct */
struct list_head dev_list;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct rnbd_srv_session *sess;
struct rnbd_srv_dev *dev;
struct kobject kobj;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c362f4ad80ab..4defd7f387c7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -465,7 +465,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
}
req->dev = vbd->pdevice;
- req->bdev = vbd->bdev;
+ req->bdev = vbd->bdev_handle->bdev;
rc = 0;
out:
@@ -969,7 +969,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
int err = 0;
int status = BLKIF_RSP_OKAY;
struct xen_blkif *blkif = ring->blkif;
- struct block_device *bdev = blkif->vbd.bdev;
+ struct block_device *bdev = blkif->vbd.bdev_handle->bdev;
struct phys_req preq;
xen_blkif_get(blkif);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 40f67bfc052d..5ff50e76cee5 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -221,7 +221,7 @@ struct xen_vbd {
unsigned char type;
/* phys device that this vbd maps to. */
u32 pdevice;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
/* Cached size parameter. */
sector_t size;
unsigned int flush_support:1;
@@ -360,7 +360,7 @@ struct pending_req {
};
-#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev)
+#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev_handle->bdev)
#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
#define xen_blkif_put(_b) \
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index bb66178c432b..e34219ea2b05 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -81,7 +81,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
int i;
/* Not ready to connect? */
- if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
+ if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_handle)
return;
/* Already connected? */
@@ -99,12 +99,13 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
- err = sync_blockdev(blkif->vbd.bdev);
+ err = sync_blockdev(blkif->vbd.bdev_handle->bdev);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
}
- invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
+ invalidate_inode_pages2(
+ blkif->vbd.bdev_handle->bdev->bd_inode->i_mapping);
for (i = 0; i < blkif->nr_rings; i++) {
ring = &blkif->rings[i];
@@ -472,9 +473,9 @@ static void xenvbd_sysfs_delif(struct xenbus_device *dev)
static void xen_vbd_free(struct xen_vbd *vbd)
{
- if (vbd->bdev)
- blkdev_put(vbd->bdev, NULL);
- vbd->bdev = NULL;
+ if (vbd->bdev_handle)
+ bdev_release(vbd->bdev_handle);
+ vbd->bdev_handle = NULL;
}
static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
@@ -482,7 +483,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
int cdrom)
{
struct xen_vbd *vbd;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
vbd = &blkif->vbd;
vbd->handle = handle;
@@ -491,17 +492,17 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->pdevice = MKDEV(major, minor);
- bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
+ bdev_handle = bdev_open_by_dev(vbd->pdevice, vbd->readonly ?
BLK_OPEN_READ : BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(bdev)) {
+ if (IS_ERR(bdev_handle)) {
pr_warn("xen_vbd_create: device %08x could not be opened\n",
vbd->pdevice);
return -ENOENT;
}
- vbd->bdev = bdev;
- if (vbd->bdev->bd_disk == NULL) {
+ vbd->bdev_handle = bdev_handle;
+ if (vbd->bdev_handle->bdev->bd_disk == NULL) {
pr_warn("xen_vbd_create: device %08x doesn't exist\n",
vbd->pdevice);
xen_vbd_free(vbd);
@@ -509,14 +510,14 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->size = vbd_sz(vbd);
- if (cdrom || disk_to_cdi(vbd->bdev->bd_disk))
+ if (cdrom || disk_to_cdi(vbd->bdev_handle->bdev->bd_disk))
vbd->type |= VDISK_CDROM;
- if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
+ if (vbd->bdev_handle->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
- if (bdev_write_cache(bdev))
+ if (bdev_write_cache(bdev_handle->bdev))
vbd->flush_support = true;
- if (bdev_max_secure_erase_sectors(bdev))
+ if (bdev_max_secure_erase_sectors(bdev_handle->bdev))
vbd->discard_secure = true;
pr_debug("Successful creation of handle=%04x (dom=%u)\n",
@@ -569,7 +570,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
struct xen_blkif *blkif = be->blkif;
int err;
int state = 0;
- struct block_device *bdev = be->blkif->vbd.bdev;
+ struct block_device *bdev = be->blkif->vbd.bdev_handle->bdev;
if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
return;
@@ -930,15 +931,16 @@ again:
goto abort;
}
err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
- (unsigned long)
- bdev_logical_block_size(be->blkif->vbd.bdev));
+ (unsigned long)bdev_logical_block_size(
+ be->blkif->vbd.bdev_handle->bdev));
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/sector-size",
dev->nodename);
goto abort;
}
err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
- bdev_physical_block_size(be->blkif->vbd.bdev));
+ bdev_physical_block_size(
+ be->blkif->vbd.bdev_handle->bdev));
if (err)
xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
dev->nodename);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 06673c6ca255..d77d3664ca08 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -414,17 +414,14 @@ static ssize_t writeback_limit_show(struct device *dev,
static void reset_bdev(struct zram *zram)
{
- struct block_device *bdev;
-
if (!zram->backing_dev)
return;
- bdev = zram->bdev;
- blkdev_put(bdev, zram);
+ bdev_release(zram->bdev_handle);
/* hope filp_close flush all of IO */
filp_close(zram->backing_dev, NULL);
zram->backing_dev = NULL;
- zram->bdev = NULL;
+ zram->bdev_handle = NULL;
zram->disk->fops = &zram_devops;
kvfree(zram->bitmap);
zram->bitmap = NULL;
@@ -470,7 +467,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct address_space *mapping;
unsigned int bitmap_sz;
unsigned long nr_pages, *bitmap = NULL;
- struct block_device *bdev = NULL;
+ struct bdev_handle *bdev_handle = NULL;
int err;
struct zram *zram = dev_to_zram(dev);
@@ -507,11 +504,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
- bdev = blkdev_get_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- zram, NULL);
- if (IS_ERR(bdev)) {
- err = PTR_ERR(bdev);
- bdev = NULL;
+ bdev_handle = bdev_open_by_dev(inode->i_rdev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, zram, NULL);
+ if (IS_ERR(bdev_handle)) {
+ err = PTR_ERR(bdev_handle);
+ bdev_handle = NULL;
goto out;
}
@@ -525,7 +522,7 @@ static ssize_t backing_dev_store(struct device *dev,
reset_bdev(zram);
- zram->bdev = bdev;
+ zram->bdev_handle = bdev_handle;
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
@@ -538,8 +535,8 @@ static ssize_t backing_dev_store(struct device *dev,
out:
kvfree(bitmap);
- if (bdev)
- blkdev_put(bdev, zram);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
if (backing_dev)
filp_close(backing_dev, NULL);
@@ -581,7 +578,7 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
{
struct bio *bio;
- bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
+ bio = bio_alloc(zram->bdev_handle->bdev, 1, parent->bi_opf, GFP_NOIO);
bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio_chain(bio, parent);
@@ -697,7 +694,7 @@ static ssize_t writeback_store(struct device *dev,
continue;
}
- bio_init(&bio, zram->bdev, &bio_vec, 1,
+ bio_init(&bio, zram->bdev_handle->bdev, &bio_vec, 1,
REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
__bio_add_page(&bio, page, PAGE_SIZE, 0);
@@ -779,7 +776,7 @@ static void zram_sync_read(struct work_struct *work)
struct bio_vec bv;
struct bio bio;
- bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
+ bio_init(&bio, zw->zram->bdev_handle->bdev, &bv, 1, REQ_OP_READ);
bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9);
__bio_add_page(&bio, zw->page, PAGE_SIZE, 0);
zw->error = submit_bio_wait(&bio);
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index ca7a15bd4845..d090753f97be 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -132,7 +132,7 @@ struct zram {
spinlock_t wb_limit_lock;
bool wb_limit_enable;
u64 bd_wb_limit;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
unsigned long *bitmap;
unsigned long nr_pages;
#endif
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 84c2c2e1122f..277d039ecbb4 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -962,13 +962,10 @@ static void btrtl_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
skb_put_data(skb, buf, strlen(buf));
}
-static int btrtl_register_devcoredump_support(struct hci_dev *hdev)
+static void btrtl_register_devcoredump_support(struct hci_dev *hdev)
{
- int err;
+ hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
- err = hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
-
- return err;
}
void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name)
@@ -1255,8 +1252,7 @@ int btrtl_download_firmware(struct hci_dev *hdev,
}
done:
- if (!err)
- err = btrtl_register_devcoredump_support(hdev);
+ btrtl_register_devcoredump_support(hdev);
return err;
}
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 40e2b9fa11a2..f3892e9ce800 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -74,7 +74,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
struct vhci_data *data = hci_get_drvdata(hdev);
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+ mutex_lock(&data->open_mutex);
skb_queue_tail(&data->readq, skb);
+ mutex_unlock(&data->open_mutex);
wake_up_interruptible(&data->read_wait);
return 0;
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index a57677f908f3..d6e5e3abaad8 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -3,7 +3,7 @@ menu "Cache Drivers"
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
- depends on RISCV_DMA_NONCOHERENT
+ depends on RISCV
select RISCV_NONSTANDARD_CACHE_OPS
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 9211531689b2..22d249333f53 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -920,7 +920,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
if (ret > 0) {
struct inode *inode = file_inode(file);
- inode->i_atime = current_time(inode);
+ inode_set_atime_to_ts(inode, current_time(inode));
}
return ret;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c249f9791ae8..473563bc7496 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3416,6 +3416,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
unsigned int i, char terminator)
{
struct clk_core *parent;
+ const char *name = NULL;
/*
* Go through the following options to fetch a parent's name.
@@ -3430,18 +3431,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
* registered (yet).
*/
parent = clk_core_get_parent_by_index(core, i);
- if (parent)
+ if (parent) {
seq_puts(s, parent->name);
- else if (core->parents[i].name)
+ } else if (core->parents[i].name) {
seq_puts(s, core->parents[i].name);
- else if (core->parents[i].fw_name)
+ } else if (core->parents[i].fw_name) {
seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
- else if (core->parents[i].index >= 0)
- seq_puts(s,
- of_clk_get_parent_name(core->of_node,
- core->parents[i].index));
- else
- seq_puts(s, "(missing)");
+ } else {
+ if (core->parents[i].index >= 0)
+ name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
+ if (!name)
+ name = "(missing)";
+
+ seq_puts(s, name);
+ }
seq_putc(s, terminator);
}
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 8dd601bd8538..0a5a95e0267f 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -87,10 +87,8 @@ static int socfpga_clk_set_parent(struct clk_hw *hwclk, u8 parent)
return 0;
}
-static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
- unsigned long parent_rate)
+static u32 socfpga_clk_get_div(struct socfpga_gate_clk *socfpgaclk)
{
- struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
u32 div = 1, val;
if (socfpgaclk->fixed_div)
@@ -105,12 +103,33 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
div = (1 << val);
}
+ return div;
+}
+
+static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = socfpga_clk_get_div(socfpgaclk);
+
return parent_rate / div;
}
+
+static int socfpga_clk_determine_rate(struct clk_hw *hwclk,
+ struct clk_rate_request *req)
+{
+ struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk);
+ u32 div = socfpga_clk_get_div(socfpgaclk);
+
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
+}
+
static struct clk_ops gateclk_ops = {
.recalc_rate = socfpga_clk_recalc_rate,
- .determine_rate = clk_hw_determine_rate_no_reparent,
+ .determine_rate = socfpga_clk_determine_rate,
.get_parent = socfpga_clk_get_parent,
.set_parent = socfpga_clk_set_parent,
};
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index d5aa09e9fce4..067b918a8894 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -431,7 +431,7 @@ static int clk_stm32_composite_determine_rate(struct clk_hw *hw,
{
struct clk_stm32_composite *composite = to_clk_stm32_composite(hw);
const struct stm32_div_cfg *divider;
- unsigned long rate;
+ long rate;
if (composite->div_id == NO_STM32_DIV)
return 0;
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 868bc7af21b0..9b2824ed785b 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -749,9 +749,14 @@ static struct ti_dt_clk omap44xx_clks[] = {
DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
+ DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
+ DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
DT_CLK(NULL, "mcbsp4_sync_mux_ck", "l4-per-clkctrl:00c0:26"),
+ DT_CLK("48096000.mcbsp", "prcm_fck", "l4-per-clkctrl:00c0:26"),
DT_CLK(NULL, "ocp2scp_usb_phy_phy_48m", "l3-init-clkctrl:00c0:8"),
DT_CLK(NULL, "otg_60m_gfclk", "l3-init-clkctrl:0040:24"),
+ DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
DT_CLK(NULL, "per_mcbsp4_gfclk", "l4-per-clkctrl:00c0:24"),
DT_CLK(NULL, "pmd_stm_clock_mux_ck", "emu-sys-clkctrl:0000:20"),
DT_CLK(NULL, "pmd_trace_clk_mux_ck", "emu-sys-clkctrl:0000:22"),
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index b4aff76eb373..74dfd5823f83 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -565,15 +565,19 @@ static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK(NULL, "gpio8_dbclk", "l4per-clkctrl:00f8:8"),
DT_CLK(NULL, "mcbsp1_gfclk", "abe-clkctrl:0028:24"),
DT_CLK(NULL, "mcbsp1_sync_mux_ck", "abe-clkctrl:0028:26"),
+ DT_CLK("40122000.mcbsp", "prcm_fck", "abe-clkctrl:0028:26"),
DT_CLK(NULL, "mcbsp2_gfclk", "abe-clkctrl:0030:24"),
DT_CLK(NULL, "mcbsp2_sync_mux_ck", "abe-clkctrl:0030:26"),
+ DT_CLK("40124000.mcbsp", "prcm_fck", "abe-clkctrl:0030:26"),
DT_CLK(NULL, "mcbsp3_gfclk", "abe-clkctrl:0038:24"),
DT_CLK(NULL, "mcbsp3_sync_mux_ck", "abe-clkctrl:0038:26"),
+ DT_CLK("40126000.mcbsp", "prcm_fck", "abe-clkctrl:0038:26"),
DT_CLK(NULL, "mmc1_32khz_clk", "l3init-clkctrl:0008:8"),
DT_CLK(NULL, "mmc1_fclk", "l3init-clkctrl:0008:25"),
DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
DT_CLK(NULL, "mmc2_fclk", "l3init-clkctrl:0010:25"),
DT_CLK(NULL, "mmc2_fclk_mux", "l3init-clkctrl:0010:24"),
+ DT_CLK(NULL, "pad_fck", "pad_clks_ck"),
DT_CLK(NULL, "sata_ref_clk", "l3init-clkctrl:0068:8"),
DT_CLK(NULL, "timer10_gfclk_mux", "l4per-clkctrl:0008:24"),
DT_CLK(NULL, "timer11_gfclk_mux", "l4per-clkctrl:0010:24"),
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 05d562e9c8b1..44b19e696176 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -54,7 +54,7 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
enum proc_cn_mcast_op mc_op;
uintptr_t val;
- if (!dsk || !data)
+ if (!dsk || !dsk->sk_user_data || !data)
return 0;
ptr = (__u32 *)data;
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 59a4c0259456..154590e1f764 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -35,6 +35,9 @@ struct virtio_crypto {
struct virtqueue *ctrl_vq;
struct data_queue *data_vq;
+ /* Work struct for config space updates */
+ struct work_struct config_work;
+
/* To protect the vq operations for the controlq */
spinlock_t ctrl_lock;
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 94849fa3bd74..43a0838d31ff 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
virtcrypto_free_queues(vcrypto);
}
+static void vcrypto_config_changed_work(struct work_struct *work)
+{
+ struct virtio_crypto *vcrypto =
+ container_of(work, struct virtio_crypto, config_work);
+
+ virtcrypto_update_status(vcrypto);
+}
+
static int virtcrypto_probe(struct virtio_device *vdev)
{
int err = -EFAULT;
@@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev)
if (err)
goto free_engines;
+ INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
+
return 0;
free_engines:
@@ -490,6 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+ flush_work(&vcrypto->config_work);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
virtio_reset_device(vdev);
@@ -504,7 +515,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
- virtcrypto_update_status(vcrypto);
+ schedule_work(&vcrypto->config_work);
}
#ifdef CONFIG_PM_SLEEP
@@ -512,6 +523,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
+ flush_work(&vcrypto->config_work);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 110e99b86a66..5a7f3fabee22 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -561,4 +561,16 @@ config EDAC_NPCM
error detection (in-line ECC in which a section 1/8th of the memory
device used to store data is used for ECC storage).
+config EDAC_VERSAL
+ tristate "Xilinx Versal DDR Memory Controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ Support for error detection and correction on the Xilinx Versal DDR
+ memory controller.
+
+ Report both single bit errors (CE) and double bit errors (UE).
+ Support injecting both correctable and uncorrectable errors
+ for debugging purposes.
+
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 61945d3113cc..9c09893695b7 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -86,3 +86,4 @@ obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
+obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
new file mode 100644
index 000000000000..87e730dfefa0
--- /dev/null
+++ b/drivers/edac/versal_edac.c
@@ -0,0 +1,1069 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Versal memory controller driver
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/firmware/xlnx-event-manager.h>
+
+#include "edac_module.h"
+
+/* Granularity of reported error in bytes */
+#define XDDR_EDAC_ERR_GRAIN 1
+
+#define XDDR_EDAC_MSG_SIZE 256
+#define EVENT 2
+
+#define XDDR_PCSR_OFFSET 0xC
+#define XDDR_ISR_OFFSET 0x14
+#define XDDR_IRQ_EN_OFFSET 0x20
+#define XDDR_IRQ1_EN_OFFSET 0x2C
+#define XDDR_IRQ_DIS_OFFSET 0x24
+#define XDDR_IRQ_CE_MASK GENMASK(18, 15)
+#define XDDR_IRQ_UE_MASK GENMASK(14, 11)
+
+#define XDDR_REG_CONFIG0_OFFSET 0x258
+#define XDDR_REG_CONFIG0_BUS_WIDTH_MASK GENMASK(19, 18)
+#define XDDR_REG_CONFIG0_NUM_CHANS_MASK BIT(17)
+#define XDDR_REG_CONFIG0_NUM_RANKS_MASK GENMASK(15, 14)
+#define XDDR_REG_CONFIG0_SIZE_MASK GENMASK(10, 8)
+
+#define XDDR_REG_PINOUT_OFFSET 0x25C
+#define XDDR_REG_PINOUT_ECC_EN_MASK GENMASK(7, 5)
+
+#define ECCW0_FLIP_CTRL 0x109C
+#define ECCW0_FLIP0_OFFSET 0x10A0
+#define ECCW1_FLIP_CTRL 0x10AC
+#define ECCW1_FLIP0_OFFSET 0x10B0
+#define ECCR0_CERR_STAT_OFFSET 0x10BC
+#define ECCR0_CE_ADDR_LO_OFFSET 0x10C0
+#define ECCR0_CE_ADDR_HI_OFFSET 0x10C4
+#define ECCR0_CE_DATA_LO_OFFSET 0x10C8
+#define ECCR0_CE_DATA_HI_OFFSET 0x10CC
+#define ECCR0_CE_DATA_PAR_OFFSET 0x10D0
+
+#define ECCR0_UERR_STAT_OFFSET 0x10D4
+#define ECCR0_UE_ADDR_LO_OFFSET 0x10D8
+#define ECCR0_UE_ADDR_HI_OFFSET 0x10DC
+#define ECCR0_UE_DATA_LO_OFFSET 0x10E0
+#define ECCR0_UE_DATA_HI_OFFSET 0x10E4
+#define ECCR0_UE_DATA_PAR_OFFSET 0x10E8
+
+#define ECCR1_CERR_STAT_OFFSET 0x10F4
+#define ECCR1_CE_ADDR_LO_OFFSET 0x10F8
+#define ECCR1_CE_ADDR_HI_OFFSET 0x10FC
+#define ECCR1_CE_DATA_LO_OFFSET 0x1100
+#define ECCR1_CE_DATA_HI_OFFSET 0x110C
+#define ECCR1_CE_DATA_PAR_OFFSET 0x1108
+
+#define ECCR1_UERR_STAT_OFFSET 0x110C
+#define ECCR1_UE_ADDR_LO_OFFSET 0x1110
+#define ECCR1_UE_ADDR_HI_OFFSET 0x1114
+#define ECCR1_UE_DATA_LO_OFFSET 0x1118
+#define ECCR1_UE_DATA_HI_OFFSET 0x111C
+#define ECCR1_UE_DATA_PAR_OFFSET 0x1120
+
+#define XDDR_NOC_REG_ADEC4_OFFSET 0x44
+#define RANK_1_MASK GENMASK(11, 6)
+#define LRANK_0_MASK GENMASK(17, 12)
+#define LRANK_1_MASK GENMASK(23, 18)
+#define MASK_24 GENMASK(29, 24)
+
+#define XDDR_NOC_REG_ADEC5_OFFSET 0x48
+#define XDDR_NOC_REG_ADEC6_OFFSET 0x4C
+#define XDDR_NOC_REG_ADEC7_OFFSET 0x50
+#define XDDR_NOC_REG_ADEC8_OFFSET 0x54
+#define XDDR_NOC_REG_ADEC9_OFFSET 0x58
+#define XDDR_NOC_REG_ADEC10_OFFSET 0x5C
+
+#define XDDR_NOC_REG_ADEC11_OFFSET 0x60
+#define MASK_0 GENMASK(5, 0)
+#define GRP_0_MASK GENMASK(11, 6)
+#define GRP_1_MASK GENMASK(17, 12)
+#define CH_0_MASK GENMASK(23, 18)
+
+#define XDDR_NOC_REG_ADEC12_OFFSET 0x71C
+#define XDDR_NOC_REG_ADEC13_OFFSET 0x720
+
+#define XDDR_NOC_REG_ADEC14_OFFSET 0x724
+#define XDDR_NOC_ROW_MATCH_MASK GENMASK(17, 0)
+#define XDDR_NOC_COL_MATCH_MASK GENMASK(27, 18)
+#define XDDR_NOC_BANK_MATCH_MASK GENMASK(29, 28)
+#define XDDR_NOC_GRP_MATCH_MASK GENMASK(31, 30)
+
+#define XDDR_NOC_REG_ADEC15_OFFSET 0x728
+#define XDDR_NOC_RANK_MATCH_MASK GENMASK(1, 0)
+#define XDDR_NOC_LRANK_MATCH_MASK GENMASK(4, 2)
+#define XDDR_NOC_CH_MATCH_MASK BIT(5)
+#define XDDR_NOC_MOD_SEL_MASK BIT(6)
+#define XDDR_NOC_MATCH_EN_MASK BIT(8)
+
+#define ECCR_UE_CE_ADDR_HI_ROW_MASK GENMASK(7, 0)
+
+#define XDDR_EDAC_NR_CSROWS 1
+#define XDDR_EDAC_NR_CHANS 1
+
+#define XDDR_BUS_WIDTH_64 0
+#define XDDR_BUS_WIDTH_32 1
+#define XDDR_BUS_WIDTH_16 2
+
+#define ECC_CEPOISON_MASK 0x1
+#define ECC_UEPOISON_MASK 0x3
+
+#define XDDR_MAX_ROW_CNT 18
+#define XDDR_MAX_COL_CNT 10
+#define XDDR_MAX_RANK_CNT 2
+#define XDDR_MAX_LRANK_CNT 3
+#define XDDR_MAX_BANK_CNT 2
+#define XDDR_MAX_GRP_CNT 2
+
+/*
+ * Config and system registers are usually locked. This is the
+ * code which unlocks them in order to accept writes. See
+ *
+ * https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register
+ */
+#define PCSR_UNLOCK_VAL 0xF9E8D7C6
+#define XDDR_ERR_TYPE_CE 0
+#define XDDR_ERR_TYPE_UE 1
+
+#define XILINX_DRAM_SIZE_4G 0
+#define XILINX_DRAM_SIZE_6G 1
+#define XILINX_DRAM_SIZE_8G 2
+#define XILINX_DRAM_SIZE_12G 3
+#define XILINX_DRAM_SIZE_16G 4
+#define XILINX_DRAM_SIZE_32G 5
+
+/**
+ * struct ecc_error_info - ECC error log information.
+ * @burstpos: Burst position.
+ * @lrank: Logical Rank number.
+ * @rank: Rank number.
+ * @group: Group number.
+ * @bank: Bank number.
+ * @col: Column number.
+ * @row: Row number.
+ * @rowhi: Row number higher bits.
+ * @i: ECC error info.
+ */
+union ecc_error_info {
+ struct {
+ u32 burstpos:3;
+ u32 lrank:3;
+ u32 rank:2;
+ u32 group:2;
+ u32 bank:2;
+ u32 col:10;
+ u32 row:10;
+ u32 rowhi;
+ };
+ u64 i;
+} __packed;
+
+union edac_info {
+ struct {
+ u32 row0:6;
+ u32 row1:6;
+ u32 row2:6;
+ u32 row3:6;
+ u32 row4:6;
+ u32 reserved:2;
+ };
+ struct {
+ u32 col1:6;
+ u32 col2:6;
+ u32 col3:6;
+ u32 col4:6;
+ u32 col5:6;
+ u32 reservedcol:2;
+ };
+ u32 i;
+} __packed;
+
+/**
+ * struct ecc_status - ECC status information to report.
+ * @ceinfo: Correctable error log information.
+ * @ueinfo: Uncorrectable error log information.
+ * @channel: Channel number.
+ * @error_type: Error type information.
+ */
+struct ecc_status {
+ union ecc_error_info ceinfo[2];
+ union ecc_error_info ueinfo[2];
+ u8 channel;
+ u8 error_type;
+};
+
+/**
+ * struct edac_priv - DDR memory controller private instance data.
+ * @ddrmc_baseaddr: Base address of the DDR controller.
+ * @ddrmc_noc_baseaddr: Base address of the DDRMC NOC.
+ * @message: Buffer for framing the event specific info.
+ * @mc_id: Memory controller ID.
+ * @ce_cnt: Correctable error count.
+ * @ue_cnt: UnCorrectable error count.
+ * @stat: ECC status information.
+ * @lrank_bit: Bit shifts for lrank bit.
+ * @rank_bit: Bit shifts for rank bit.
+ * @row_bit: Bit shifts for row bit.
+ * @col_bit: Bit shifts for column bit.
+ * @bank_bit: Bit shifts for bank bit.
+ * @grp_bit: Bit shifts for group bit.
+ * @ch_bit: Bit shifts for channel bit.
+ * @err_inject_addr: Data poison address.
+ * @debugfs: Debugfs handle.
+ */
+struct edac_priv {
+ void __iomem *ddrmc_baseaddr;
+ void __iomem *ddrmc_noc_baseaddr;
+ char message[XDDR_EDAC_MSG_SIZE];
+ u32 mc_id;
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_status stat;
+ u32 lrank_bit[3];
+ u32 rank_bit[2];
+ u32 row_bit[18];
+ u32 col_bit[10];
+ u32 bank_bit[2];
+ u32 grp_bit[2];
+ u32 ch_bit;
+#ifdef CONFIG_EDAC_DEBUG
+ u64 err_inject_addr;
+ struct dentry *debugfs;
+#endif
+};
+
+static void get_ce_error_info(struct edac_priv *priv)
+{
+ void __iomem *ddrmc_base;
+ struct ecc_status *p;
+ u32 regval;
+ u64 reghi;
+
+ ddrmc_base = priv->ddrmc_baseaddr;
+ p = &priv->stat;
+
+ p->error_type = XDDR_ERR_TYPE_CE;
+ regval = readl(ddrmc_base + ECCR0_CE_ADDR_LO_OFFSET);
+ reghi = regval & ECCR_UE_CE_ADDR_HI_ROW_MASK;
+ p->ceinfo[0].i = regval | reghi << 32;
+ regval = readl(ddrmc_base + ECCR0_CE_ADDR_HI_OFFSET);
+
+ edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
+ readl(ddrmc_base + ECCR0_CE_DATA_LO_OFFSET),
+ readl(ddrmc_base + ECCR0_CE_DATA_HI_OFFSET),
+ readl(ddrmc_base + ECCR0_CE_DATA_PAR_OFFSET));
+
+ regval = readl(ddrmc_base + ECCR1_CE_ADDR_LO_OFFSET);
+ reghi = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
+ p->ceinfo[1].i = regval | reghi << 32;
+ regval = readl(ddrmc_base + ECCR1_CE_ADDR_HI_OFFSET);
+
+ edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
+ readl(ddrmc_base + ECCR1_CE_DATA_LO_OFFSET),
+ readl(ddrmc_base + ECCR1_CE_DATA_HI_OFFSET),
+ readl(ddrmc_base + ECCR1_CE_DATA_PAR_OFFSET));
+}
+
+static void get_ue_error_info(struct edac_priv *priv)
+{
+ void __iomem *ddrmc_base;
+ struct ecc_status *p;
+ u32 regval;
+ u64 reghi;
+
+ ddrmc_base = priv->ddrmc_baseaddr;
+ p = &priv->stat;
+
+ p->error_type = XDDR_ERR_TYPE_UE;
+ regval = readl(ddrmc_base + ECCR0_UE_ADDR_LO_OFFSET);
+ reghi = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
+
+ p->ueinfo[0].i = regval | reghi << 32;
+ regval = readl(ddrmc_base + ECCR0_UE_ADDR_HI_OFFSET);
+
+ edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
+ readl(ddrmc_base + ECCR0_UE_DATA_LO_OFFSET),
+ readl(ddrmc_base + ECCR0_UE_DATA_HI_OFFSET),
+ readl(ddrmc_base + ECCR0_UE_DATA_PAR_OFFSET));
+
+ regval = readl(ddrmc_base + ECCR1_UE_ADDR_LO_OFFSET);
+ reghi = readl(ddrmc_base + ECCR1_UE_ADDR_HI_OFFSET);
+ p->ueinfo[1].i = regval | reghi << 32;
+
+ edac_dbg(2, "ERR DATA: 0x%08X%08X ERR DATA PARITY: 0x%08X\n",
+ readl(ddrmc_base + ECCR1_UE_DATA_LO_OFFSET),
+ readl(ddrmc_base + ECCR1_UE_DATA_HI_OFFSET),
+ readl(ddrmc_base + ECCR1_UE_DATA_PAR_OFFSET));
+}
+
+static bool get_error_info(struct edac_priv *priv)
+{
+ u32 eccr0_ceval, eccr1_ceval, eccr0_ueval, eccr1_ueval;
+ void __iomem *ddrmc_base;
+ struct ecc_status *p;
+
+ ddrmc_base = priv->ddrmc_baseaddr;
+ p = &priv->stat;
+
+ eccr0_ceval = readl(ddrmc_base + ECCR0_CERR_STAT_OFFSET);
+ eccr1_ceval = readl(ddrmc_base + ECCR1_CERR_STAT_OFFSET);
+ eccr0_ueval = readl(ddrmc_base + ECCR0_UERR_STAT_OFFSET);
+ eccr1_ueval = readl(ddrmc_base + ECCR1_UERR_STAT_OFFSET);
+
+ if (!eccr0_ceval && !eccr1_ceval && !eccr0_ueval && !eccr1_ueval)
+ return 1;
+ if (!eccr0_ceval)
+ p->channel = 1;
+ else
+ p->channel = 0;
+
+ if (eccr0_ceval || eccr1_ceval)
+ get_ce_error_info(priv);
+
+ if (eccr0_ueval || eccr1_ueval) {
+ if (!eccr0_ueval)
+ p->channel = 1;
+ else
+ p->channel = 0;
+ get_ue_error_info(priv);
+ }
+
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, ddrmc_base + XDDR_PCSR_OFFSET);
+
+ writel(0, ddrmc_base + ECCR0_CERR_STAT_OFFSET);
+ writel(0, ddrmc_base + ECCR1_CERR_STAT_OFFSET);
+ writel(0, ddrmc_base + ECCR0_UERR_STAT_OFFSET);
+ writel(0, ddrmc_base + ECCR1_UERR_STAT_OFFSET);
+
+ /* Lock the PCSR registers */
+ writel(1, ddrmc_base + XDDR_PCSR_OFFSET);
+
+ return 0;
+}
+
+/**
+ * convert_to_physical - Convert to physical address.
+ * @priv: DDR memory controller private instance data.
+ * @pinf: ECC error info structure.
+ *
+ * Return: Physical address of the DDR memory.
+ */
+static unsigned long convert_to_physical(struct edac_priv *priv, union ecc_error_info pinf)
+{
+ unsigned long err_addr = 0;
+ u32 index;
+ u32 row;
+
+ row = pinf.rowhi << 10 | pinf.row;
+ for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
+ err_addr |= (row & BIT(0)) << priv->row_bit[index];
+ row >>= 1;
+ }
+
+ for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
+ err_addr |= (pinf.col & BIT(0)) << priv->col_bit[index];
+ pinf.col >>= 1;
+ }
+
+ for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
+ err_addr |= (pinf.bank & BIT(0)) << priv->bank_bit[index];
+ pinf.bank >>= 1;
+ }
+
+ for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
+ err_addr |= (pinf.group & BIT(0)) << priv->grp_bit[index];
+ pinf.group >>= 1;
+ }
+
+ for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
+ err_addr |= (pinf.rank & BIT(0)) << priv->rank_bit[index];
+ pinf.rank >>= 1;
+ }
+
+ for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
+ err_addr |= (pinf.lrank & BIT(0)) << priv->lrank_bit[index];
+ pinf.lrank >>= 1;
+ }
+
+ err_addr |= (priv->stat.channel & BIT(0)) << priv->ch_bit;
+
+ return err_addr;
+}
+
+/**
+ * handle_error - Handle Correctable and Uncorrectable errors.
+ * @mci: EDAC memory controller instance.
+ * @stat: ECC status structure.
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat)
+{
+ struct edac_priv *priv = mci->pvt_info;
+ union ecc_error_info pinf;
+
+ if (stat->error_type == XDDR_ERR_TYPE_CE) {
+ priv->ce_cnt++;
+ pinf = stat->ceinfo[stat->channel];
+ snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
+ "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
+ "CE", priv->mc_id,
+ convert_to_physical(priv, pinf), pinf.burstpos);
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ priv->ce_cnt, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (stat->error_type == XDDR_ERR_TYPE_UE) {
+ priv->ue_cnt++;
+ pinf = stat->ueinfo[stat->channel];
+ snprintf(priv->message, XDDR_EDAC_MSG_SIZE,
+ "Error type:%s MC ID: %d Addr at %lx Burst Pos: %d\n",
+ "UE", priv->mc_id,
+ convert_to_physical(priv, pinf), pinf.burstpos);
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ priv->ue_cnt, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ memset(stat, 0, sizeof(*stat));
+}
+
+/**
+ * err_callback - Handle Correctable and Uncorrectable errors.
+ * @payload: payload data.
+ * @data: mci controller data.
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void err_callback(const u32 *payload, void *data)
+{
+ struct mem_ctl_info *mci = (struct mem_ctl_info *)data;
+ struct edac_priv *priv;
+ struct ecc_status *p;
+ int regval;
+
+ priv = mci->pvt_info;
+ p = &priv->stat;
+
+ regval = readl(priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
+
+ if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_CR)
+ p->error_type = XDDR_ERR_TYPE_CE;
+ if (payload[EVENT] == XPM_EVENT_ERROR_MASK_DDRMC_NCR)
+ p->error_type = XDDR_ERR_TYPE_UE;
+
+ if (get_error_info(priv))
+ return;
+
+ handle_error(mci, &priv->stat);
+
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+
+ /* Clear the ISR */
+ writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
+
+ /* Lock the PCSR registers */
+ writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ edac_dbg(3, "Total error count CE %d UE %d\n",
+ priv->ce_cnt, priv->ue_cnt);
+}
+
+/**
+ * get_dwidth - Return the controller memory width.
+ * @base: DDR memory controller base address.
+ *
+ * Get the EDAC device type width appropriate for the controller
+ * configuration.
+ *
+ * Return: a device type width enumeration.
+ */
+static enum dev_type get_dwidth(const void __iomem *base)
+{
+ enum dev_type dt;
+ u32 regval;
+ u32 width;
+
+ regval = readl(base + XDDR_REG_CONFIG0_OFFSET);
+ width = FIELD_GET(XDDR_REG_CONFIG0_BUS_WIDTH_MASK, regval);
+
+ switch (width) {
+ case XDDR_BUS_WIDTH_16:
+ dt = DEV_X2;
+ break;
+ case XDDR_BUS_WIDTH_32:
+ dt = DEV_X4;
+ break;
+ case XDDR_BUS_WIDTH_64:
+ dt = DEV_X8;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ return dt;
+}
+
+/**
+ * get_ecc_state - Return the controller ECC enable/disable status.
+ * @base: DDR memory controller base address.
+ *
+ * Get the ECC enable/disable status for the controller.
+ *
+ * Return: a ECC status boolean i.e true/false - enabled/disabled.
+ */
+static bool get_ecc_state(void __iomem *base)
+{
+ enum dev_type dt;
+ u32 ecctype;
+
+ dt = get_dwidth(base);
+ if (dt == DEV_UNKNOWN)
+ return false;
+
+ ecctype = readl(base + XDDR_REG_PINOUT_OFFSET);
+ ecctype &= XDDR_REG_PINOUT_ECC_EN_MASK;
+
+ return !!ecctype;
+}
+
+/**
+ * get_memsize - Get the size of the attached memory device.
+ * @priv: DDR memory controller private instance data.
+ *
+ * Return: the memory size in bytes.
+ */
+static u64 get_memsize(struct edac_priv *priv)
+{
+ u32 regval;
+ u64 size;
+
+ regval = readl(priv->ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
+ regval = FIELD_GET(XDDR_REG_CONFIG0_SIZE_MASK, regval);
+
+ switch (regval) {
+ case XILINX_DRAM_SIZE_4G:
+ size = 4U; break;
+ case XILINX_DRAM_SIZE_6G:
+ size = 6U; break;
+ case XILINX_DRAM_SIZE_8G:
+ size = 8U; break;
+ case XILINX_DRAM_SIZE_12G:
+ size = 12U; break;
+ case XILINX_DRAM_SIZE_16G:
+ size = 16U; break;
+ case XILINX_DRAM_SIZE_32G:
+ size = 32U; break;
+ /* Invalid configuration */
+ default:
+ size = 0; break;
+ }
+
+ size *= SZ_1G;
+ return size;
+}
+
+/**
+ * init_csrows - Initialize the csrow data.
+ * @mci: EDAC memory controller instance.
+ *
+ * Initialize the chip select rows associated with the EDAC memory
+ * controller instance.
+ */
+static void init_csrows(struct mem_ctl_info *mci)
+{
+ struct edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ unsigned long size;
+ u32 row;
+ int ch;
+
+ size = get_memsize(priv);
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ for (ch = 0; ch < csi->nr_channels; ch++) {
+ dimm = csi->channels[ch]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = MEM_DDR4;
+ dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
+ dimm->grain = XDDR_EDAC_ERR_GRAIN;
+ dimm->dtype = get_dwidth(priv->ddrmc_baseaddr);
+ }
+ }
+}
+
+/**
+ * mc_init - Initialize one driver instance.
+ * @mci: EDAC memory controller instance.
+ * @pdev: platform device.
+ *
+ * Perform initialization of the EDAC memory controller instance and
+ * related driver-private data associated with the memory controller the
+ * instance is bound to.
+ */
+static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
+{
+ mci->pdev = &pdev->dev;
+ platform_set_drvdata(pdev, mci);
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "xlnx_ddr_controller";
+ mci->dev_name = dev_name(&pdev->dev);
+ mci->mod_name = "xlnx_edac";
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ init_csrows(mci);
+}
+
+static void enable_intr(struct edac_priv *priv)
+{
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+
+ /* Enable UE and CE Interrupts to support the interrupt case */
+ writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
+ priv->ddrmc_baseaddr + XDDR_IRQ_EN_OFFSET);
+
+ writel(XDDR_IRQ_UE_MASK,
+ priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET);
+ /* Lock the PCSR registers */
+ writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+}
+
+static void disable_intr(struct edac_priv *priv)
+{
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+
+ /* Disable UE/CE Interrupts */
+ writel(XDDR_IRQ_CE_MASK | XDDR_IRQ_UE_MASK,
+ priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET);
+
+ /* Lock the PCSR registers */
+ writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+#ifdef CONFIG_EDAC_DEBUG
+/**
+ * poison_setup - Update poison registers.
+ * @priv: DDR memory controller private instance data.
+ *
+ * Update poison registers as per DDR mapping upon write of the address
+ * location the fault is injected.
+ * Return: none.
+ */
+static void poison_setup(struct edac_priv *priv)
+{
+ u32 col = 0, row = 0, bank = 0, grp = 0, rank = 0, lrank = 0, ch = 0;
+ u32 index, regval;
+
+ for (index = 0; index < XDDR_MAX_ROW_CNT; index++) {
+ row |= (((priv->err_inject_addr >> priv->row_bit[index]) &
+ BIT(0)) << index);
+ }
+
+ for (index = 0; index < XDDR_MAX_COL_CNT; index++) {
+ col |= (((priv->err_inject_addr >> priv->col_bit[index]) &
+ BIT(0)) << index);
+ }
+
+ for (index = 0; index < XDDR_MAX_BANK_CNT; index++) {
+ bank |= (((priv->err_inject_addr >> priv->bank_bit[index]) &
+ BIT(0)) << index);
+ }
+
+ for (index = 0; index < XDDR_MAX_GRP_CNT; index++) {
+ grp |= (((priv->err_inject_addr >> priv->grp_bit[index]) &
+ BIT(0)) << index);
+ }
+
+ for (index = 0; index < XDDR_MAX_RANK_CNT; index++) {
+ rank |= (((priv->err_inject_addr >> priv->rank_bit[index]) &
+ BIT(0)) << index);
+ }
+
+ for (index = 0; index < XDDR_MAX_LRANK_CNT; index++) {
+ lrank |= (((priv->err_inject_addr >> priv->lrank_bit[index]) &
+ BIT(0)) << index);
+ }
+
+ ch = (priv->err_inject_addr >> priv->ch_bit) & BIT(0);
+ if (ch)
+ writel(0xFF, priv->ddrmc_baseaddr + ECCW1_FLIP_CTRL);
+ else
+ writel(0xFF, priv->ddrmc_baseaddr + ECCW0_FLIP_CTRL);
+
+ writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC12_OFFSET);
+ writel(0, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC13_OFFSET);
+
+ regval = row & XDDR_NOC_ROW_MATCH_MASK;
+ regval |= FIELD_PREP(XDDR_NOC_COL_MATCH_MASK, col);
+ regval |= FIELD_PREP(XDDR_NOC_BANK_MATCH_MASK, bank);
+ regval |= FIELD_PREP(XDDR_NOC_GRP_MATCH_MASK, grp);
+ writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC14_OFFSET);
+
+ regval = rank & XDDR_NOC_RANK_MATCH_MASK;
+ regval |= FIELD_PREP(XDDR_NOC_LRANK_MATCH_MASK, lrank);
+ regval |= FIELD_PREP(XDDR_NOC_CH_MATCH_MASK, ch);
+ regval |= (XDDR_NOC_MOD_SEL_MASK | XDDR_NOC_MATCH_EN_MASK);
+ writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET);
+}
+
+static ssize_t xddr_inject_data_poison_store(struct mem_ctl_info *mci,
+ const char __user *data)
+{
+ struct edac_priv *priv = mci->pvt_info;
+
+ writel(0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
+ writel(0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
+
+ if (strncmp(data, "CE", 2) == 0) {
+ writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
+ ECCW0_FLIP0_OFFSET);
+ writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
+ ECCW1_FLIP0_OFFSET);
+ } else {
+ writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
+ ECCW0_FLIP0_OFFSET);
+ writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
+ ECCW1_FLIP0_OFFSET);
+ }
+
+ /* Lock the PCSR registers */
+ writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+
+ return 0;
+}
+
+static ssize_t inject_data_poison_store(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct edac_priv *priv = mci->pvt_info;
+
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+
+ poison_setup(priv);
+
+ /* Lock the PCSR registers */
+ writel(1, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+
+ xddr_inject_data_poison_store(mci, data);
+
+ return count;
+}
+
+static const struct file_operations xddr_inject_enable_fops = {
+ .open = simple_open,
+ .write = inject_data_poison_store,
+ .llseek = generic_file_llseek,
+};
+
+static void create_debugfs_attributes(struct mem_ctl_info *mci)
+{
+ struct edac_priv *priv = mci->pvt_info;
+
+ priv->debugfs = edac_debugfs_create_dir(mci->dev_name);
+ if (!priv->debugfs)
+ return;
+
+ edac_debugfs_create_file("inject_error", 0200, priv->debugfs,
+ &mci->dev, &xddr_inject_enable_fops);
+ debugfs_create_x64("address", 0600, priv->debugfs,
+ &priv->err_inject_addr);
+ mci->debugfs = priv->debugfs;
+}
+
+static inline void process_bit(struct edac_priv *priv, unsigned int start, u32 regval)
+{
+ union edac_info rows;
+
+ rows.i = regval;
+ priv->row_bit[start] = rows.row0;
+ priv->row_bit[start + 1] = rows.row1;
+ priv->row_bit[start + 2] = rows.row2;
+ priv->row_bit[start + 3] = rows.row3;
+ priv->row_bit[start + 4] = rows.row4;
+}
+
+static void setup_row_address_map(struct edac_priv *priv)
+{
+ u32 regval;
+ union edac_info rows;
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC5_OFFSET);
+ process_bit(priv, 0, regval);
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC6_OFFSET);
+ process_bit(priv, 5, regval);
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC7_OFFSET);
+ process_bit(priv, 10, regval);
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
+ rows.i = regval;
+
+ priv->row_bit[15] = rows.row0;
+ priv->row_bit[16] = rows.row1;
+ priv->row_bit[17] = rows.row2;
+}
+
+static void setup_column_address_map(struct edac_priv *priv)
+{
+ u32 regval;
+ union edac_info cols;
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC8_OFFSET);
+ priv->col_bit[0] = FIELD_GET(MASK_24, regval);
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC9_OFFSET);
+ cols.i = regval;
+ priv->col_bit[1] = cols.col1;
+ priv->col_bit[2] = cols.col2;
+ priv->col_bit[3] = cols.col3;
+ priv->col_bit[4] = cols.col4;
+ priv->col_bit[5] = cols.col5;
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
+ cols.i = regval;
+ priv->col_bit[6] = cols.col1;
+ priv->col_bit[7] = cols.col2;
+ priv->col_bit[8] = cols.col3;
+ priv->col_bit[9] = cols.col4;
+}
+
+static void setup_bank_grp_ch_address_map(struct edac_priv *priv)
+{
+ u32 regval;
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC10_OFFSET);
+ priv->bank_bit[0] = FIELD_GET(MASK_24, regval);
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC11_OFFSET);
+ priv->bank_bit[1] = (regval & MASK_0);
+ priv->grp_bit[0] = FIELD_GET(GRP_0_MASK, regval);
+ priv->grp_bit[1] = FIELD_GET(GRP_1_MASK, regval);
+ priv->ch_bit = FIELD_GET(CH_0_MASK, regval);
+}
+
+static void setup_rank_lrank_address_map(struct edac_priv *priv)
+{
+ u32 regval;
+
+ regval = readl(priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC4_OFFSET);
+ priv->rank_bit[0] = (regval & MASK_0);
+ priv->rank_bit[1] = FIELD_GET(RANK_1_MASK, regval);
+ priv->lrank_bit[0] = FIELD_GET(LRANK_0_MASK, regval);
+ priv->lrank_bit[1] = FIELD_GET(LRANK_1_MASK, regval);
+ priv->lrank_bit[2] = FIELD_GET(MASK_24, regval);
+}
+
+/**
+ * setup_address_map - Set Address Map by querying ADDRMAP registers.
+ * @priv: DDR memory controller private instance data.
+ *
+ * Set Address Map by querying ADDRMAP registers.
+ *
+ * Return: none.
+ */
+static void setup_address_map(struct edac_priv *priv)
+{
+ setup_row_address_map(priv);
+
+ setup_column_address_map(priv);
+
+ setup_bank_grp_ch_address_map(priv);
+
+ setup_rank_lrank_address_map(priv);
+}
+#endif /* CONFIG_EDAC_DEBUG */
+
+static const struct of_device_id xlnx_edac_match[] = {
+ { .compatible = "xlnx,versal-ddrmc", },
+ {
+ /* end of table */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, xlnx_edac_match);
+static u32 emif_get_id(struct device_node *node)
+{
+ u32 addr, my_addr, my_id = 0;
+ struct device_node *np;
+ const __be32 *addrp;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ my_addr = (u32)of_translate_address(node, addrp);
+
+ for_each_matching_node(np, xlnx_edac_match) {
+ if (np == node)
+ continue;
+
+ addrp = of_get_address(np, 0, NULL, NULL);
+ addr = (u32)of_translate_address(np, addrp);
+
+ edac_printk(KERN_INFO, EDAC_MC,
+ "addr=%x, my_addr=%x\n",
+ addr, my_addr);
+
+ if (addr < my_addr)
+ my_id++;
+ }
+
+ return my_id;
+}
+
+static int mc_probe(struct platform_device *pdev)
+{
+ void __iomem *ddrmc_baseaddr, *ddrmc_noc_baseaddr;
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ u8 num_chans, num_csrows;
+ struct edac_priv *priv;
+ u32 edac_mc_id, regval;
+ int rc;
+
+ ddrmc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "base");
+ if (IS_ERR(ddrmc_baseaddr))
+ return PTR_ERR(ddrmc_baseaddr);
+
+ ddrmc_noc_baseaddr = devm_platform_ioremap_resource_byname(pdev, "noc");
+ if (IS_ERR(ddrmc_noc_baseaddr))
+ return PTR_ERR(ddrmc_noc_baseaddr);
+
+ if (!get_ecc_state(ddrmc_baseaddr))
+ return -ENXIO;
+
+ /* Allocate ID number for the EMIF controller */
+ edac_mc_id = emif_get_id(pdev->dev.of_node);
+
+ regval = readl(ddrmc_baseaddr + XDDR_REG_CONFIG0_OFFSET);
+ num_chans = FIELD_PREP(XDDR_REG_CONFIG0_NUM_CHANS_MASK, regval);
+ num_chans++;
+
+ num_csrows = FIELD_PREP(XDDR_REG_CONFIG0_NUM_RANKS_MASK, regval);
+ num_csrows *= 2;
+ if (!num_csrows)
+ num_csrows = 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = num_csrows;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+
+ mci = edac_mc_alloc(edac_mc_id, ARRAY_SIZE(layers), layers,
+ sizeof(struct edac_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed memory allocation for mc instance\n");
+ return -ENOMEM;
+ }
+
+ priv = mci->pvt_info;
+ priv->ddrmc_baseaddr = ddrmc_baseaddr;
+ priv->ddrmc_noc_baseaddr = ddrmc_noc_baseaddr;
+ priv->ce_cnt = 0;
+ priv->ue_cnt = 0;
+ priv->mc_id = edac_mc_id;
+
+ mc_init(mci, pdev);
+
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to register with EDAC core\n");
+ goto free_edac_mc;
+ }
+
+ rc = xlnx_register_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
+ XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR |
+ XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR,
+ false, err_callback, mci);
+ if (rc) {
+ if (rc == -EACCES)
+ rc = -EPROBE_DEFER;
+
+ goto del_mc;
+ }
+
+#ifdef CONFIG_EDAC_DEBUG
+ create_debugfs_attributes(mci);
+ setup_address_map(priv);
+#endif
+ enable_intr(priv);
+ return rc;
+
+del_mc:
+ edac_mc_del_mc(&pdev->dev);
+free_edac_mc:
+ edac_mc_free(mci);
+
+ return rc;
+}
+
+static int mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+ struct edac_priv *priv = mci->pvt_info;
+
+ disable_intr(priv);
+
+#ifdef CONFIG_EDAC_DEBUG
+ debugfs_remove_recursive(priv->debugfs);
+#endif
+
+ xlnx_unregister_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1,
+ XPM_EVENT_ERROR_MASK_DDRMC_CR |
+ XPM_EVENT_ERROR_MASK_NOC_CR |
+ XPM_EVENT_ERROR_MASK_NOC_NCR |
+ XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+
+ return 0;
+}
+
+static struct platform_driver xilinx_ddr_edac_mc_driver = {
+ .driver = {
+ .name = "xilinx-ddrmc-edac",
+ .of_match_table = xlnx_edac_match,
+ },
+ .probe = mc_probe,
+ .remove = mc_remove,
+};
+
+module_platform_driver(xilinx_ddr_edac_mc_driver);
+
+MODULE_AUTHOR("AMD Inc");
+MODULE_DESCRIPTION("Xilinx DDRMC ECC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 749868b9e80d..7edf2c95282f 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1521,6 +1521,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
if (sbp2_param_exclusive_login) {
sdev->manage_system_start_stop = true;
sdev->manage_runtime_start_stop = true;
+ sdev->manage_shutdown = true;
}
if (sdev->type == TYPE_ROM)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ce20a60676f0..1974f0ad32ba 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -273,9 +273,13 @@ static __init int efivar_ssdt_load(void)
if (status == EFI_NOT_FOUND) {
break;
} else if (status == EFI_BUFFER_TOO_SMALL) {
- name = krealloc(name, name_size, GFP_KERNEL);
- if (!name)
+ efi_char16_t *name_tmp =
+ krealloc(name, name_size, GFP_KERNEL);
+ if (!name_tmp) {
+ kfree(name);
return -ENOMEM;
+ }
+ name = name_tmp;
continue;
}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 2fee52ed335d..9d5df683f882 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -605,11 +605,8 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
break;
case EFI_UNACCEPTED_MEMORY:
- if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) {
- efi_warn_once(
-"The system has unaccepted memory, but kernel does not support it\nConsider enabling CONFIG_UNACCEPTED_MEMORY\n");
+ if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
continue;
- }
e820_type = E820_TYPE_RAM;
process_unaccepted_memory(d->phys_addr,
d->phys_addr + PAGE_SIZE * d->num_pages);
@@ -852,6 +849,8 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
unsigned long kernel_entry;
efi_status_t status;
+ boot_params_pointer = boot_params;
+
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
index 37c5a36b9d8c..2748bca192df 100644
--- a/drivers/firmware/efi/libstub/x86-stub.h
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -2,6 +2,8 @@
#include <linux/efi.h>
+extern struct boot_params *boot_params_pointer asm("boot_params");
+
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 853f7dc3c21d..135278ddaf62 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -5,9 +5,17 @@
#include <linux/spinlock.h>
#include <asm/unaccepted_memory.h>
-/* Protects unaccepted memory bitmap */
+/* Protects unaccepted memory bitmap and accepting_list */
static DEFINE_SPINLOCK(unaccepted_memory_lock);
+struct accept_range {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+static LIST_HEAD(accepting_list);
+
/*
* accept_memory() -- Consult bitmap and accept the memory if needed.
*
@@ -24,6 +32,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long range_start, range_end;
+ struct accept_range range, *entry;
unsigned long flags;
u64 unit_size;
@@ -78,20 +87,67 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
- range_start = start / unit_size;
-
+ range.start = start / unit_size;
+ range.end = DIV_ROUND_UP(end, unit_size);
+retry:
spin_lock_irqsave(&unaccepted_memory_lock, flags);
+
+ /*
+ * Check if anybody works on accepting the same range of the memory.
+ *
+ * The check is done with unit_size granularity. It is crucial to catch
+ * all accept requests to the same unit_size block, even if they don't
+ * overlap on physical address level.
+ */
+ list_for_each_entry(entry, &accepting_list, list) {
+ if (entry->end < range.start)
+ continue;
+ if (entry->start >= range.end)
+ continue;
+
+ /*
+ * Somebody else accepting the range. Or at least part of it.
+ *
+ * Drop the lock and retry until it is complete.
+ */
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+ goto retry;
+ }
+
+ /*
+ * Register that the range is about to be accepted.
+ * Make sure nobody else will accept it.
+ */
+ list_add(&range.list, &accepting_list);
+
+ range_start = range.start;
for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
- DIV_ROUND_UP(end, unit_size)) {
+ range.end) {
unsigned long phys_start, phys_end;
unsigned long len = range_end - range_start;
phys_start = range_start * unit_size + unaccepted->phys_base;
phys_end = range_end * unit_size + unaccepted->phys_base;
+ /*
+ * Keep interrupts disabled until the accept operation is
+ * complete in order to prevent deadlocks.
+ *
+ * Enabling interrupts before calling arch_accept_memory()
+ * creates an opportunity for an interrupt handler to request
+ * acceptance for the same memory. The handler will continuously
+ * spin with interrupts disabled, preventing other task from
+ * making progress with the acceptance process.
+ */
+ spin_unlock(&unaccepted_memory_lock);
+
arch_accept_memory(phys_start, phys_end);
+
+ spin_lock(&unaccepted_memory_lock);
bitmap_clear(unaccepted->bitmap, range_start, len);
}
+
+ list_del(&range.list);
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 508eab346fc6..a48a58e0c61f 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -114,11 +114,11 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
dsp_chan->idx = i % 2;
dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(dsp_chan->ch)) {
- kfree(dsp_chan->name);
ret = PTR_ERR(dsp_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
chan_name, ret);
+ kfree(dsp_chan->name);
goto out;
}
diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig
index e4a64815f16d..d4e55204c092 100644
--- a/drivers/fpga/tests/Kconfig
+++ b/drivers/fpga/tests/Kconfig
@@ -1,6 +1,6 @@
config FPGA_KUNIT_TESTS
- tristate "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
- depends on FPGA && FPGA_REGION && FPGA_BRIDGE && KUNIT=y
+ bool "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
+ depends on FPGA=y && FPGA_REGION=y && FPGA_BRIDGE=y && KUNIT=y && MODULES=n
default KUNIT_ALL_TESTS
help
This builds unit tests for the FPGA subsystem
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
index 9f9d50ee7871..baab07e3fc59 100644
--- a/drivers/fpga/tests/fpga-region-test.c
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -93,6 +93,8 @@ static void fpga_region_test_class_find(struct kunit *test)
region = fpga_region_class_find(NULL, &ctx->region_pdev->dev, fake_region_match);
KUNIT_EXPECT_PTR_EQ(test, region, ctx->region);
+
+ put_device(&region->dev);
}
/*
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index dbc7ba0ee72c..656d6b1dddb5 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -126,14 +126,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
unsigned long mask = BIT(gpio);
u32 val;
+ vf610_gpio_set(chip, gpio, value);
+
if (port->sdata && port->sdata->have_paddr) {
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
- vf610_gpio_set(chip, gpio, value);
-
return pinctrl_gpio_direction_output(chip->base + gpio);
}
@@ -246,7 +246,8 @@ static const struct irq_chip vf610_irqchip = {
.irq_unmask = vf610_gpio_irq_unmask,
.irq_set_type = vf610_gpio_irq_set_type,
.irq_set_wake = vf610_gpio_irq_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND
+ | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index fbda452fb4d6..51e41676de0b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -951,6 +951,7 @@ static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
if (!propname)
return ERR_PTR(-EINVAL);
+ memset(&lookup, 0, sizeof(lookup));
lookup.index = index;
ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7d6daf8d2bfa..e036011137aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1103,7 +1103,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
if (unlikely(ret))
goto error;
- ret = drm_exec_lock_obj(&ctx->exec, &bo->tbo.base);
+ ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&ctx->exec);
if (unlikely(ret))
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index efdb1c48f431..d93a8961274c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -65,7 +65,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
}
amdgpu_sync_create(&p->sync);
- drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
+ drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0dc9c655c4fb..76549c2cffeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{
switch (ctx_prio) {
- case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
@@ -55,6 +54,11 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ /* UNSET priority is not valid and we don't carry that
+ * around, but set it to NORMAL in the only place this
+ * function is called, amdgpu_ctx_ioctl().
+ */
return false;
}
}
@@ -64,7 +68,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
- return DRM_SCHED_PRIORITY_UNSET;
+ pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
+ return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;
@@ -94,9 +99,6 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
int32_t priority)
{
- if (!amdgpu_ctx_priority_is_valid(priority))
- return -EINVAL;
-
/* NORMAL and below are accessible by everyone */
if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
@@ -631,8 +633,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
return 0;
}
-
-
static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id,
bool set, u32 *stable_pstate)
@@ -675,8 +675,10 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
id = args->in.ctx_id;
priority = args->in.priority;
- /* For backwards compatibility reasons, we need to accept
- * ioctls with garbage in the priority field */
+ /* For backwards compatibility, we need to accept ioctls with garbage
+ * in the priority field. Garbage values in the priority field, result
+ * in the priority being set to NORMAL.
+ */
if (!amdgpu_ctx_priority_is_valid(priority))
priority = AMDGPU_CTX_PRIORITY_NORMAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 12210598e5b8..ba3a87cb88cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -403,7 +403,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
continue;
}
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ /* Reserve fences for two SDMA page table updates */
+ r = dma_resv_reserve_fences(resv, 2);
+ if (!r)
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r)
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f5daadcec865..82f25996ff5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1090,7 +1090,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
+ if (abo->tbo.resource &&
+ abo->tbo.resource->mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
mem = bo->tbo.resource;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 6a8494f98d3e..fe8ba9e9837b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1124,7 +1124,7 @@ static void vi_program_aspm(struct amdgpu_device *adev)
bool bL1SS = false;
bool bClkReqSupport = true;
- if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_pcie_dynamic_switching_supported())
return;
if (adev->flags & AMD_IS_APU ||
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index f448b903e190..84148a79414b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -692,7 +692,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
return container_of(bridge, struct ti_sn65dsi86, bridge);
}
-static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata)
{
int val;
struct mipi_dsi_host *host;
@@ -707,7 +707,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
if (!host)
return -EPROBE_DEFER;
- dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info);
if (IS_ERR(dsi))
return PTR_ERR(dsi);
@@ -725,7 +725,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
pdata->dsi = dsi;
- return devm_mipi_dsi_attach(dev, dsi);
+ return devm_mipi_dsi_attach(&adev->dev, dsi);
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
@@ -1298,9 +1298,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
struct device_node *np = pdata->dev->of_node;
int ret;
- pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
+ pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
if (IS_ERR(pdata->next_bridge))
- return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
+ return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge),
"failed to create panel bridge\n");
ti_sn_bridge_parse_lanes(pdata, np);
@@ -1319,9 +1319,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
drm_bridge_add(&pdata->bridge);
- ret = ti_sn_attach_host(pdata);
+ ret = ti_sn_attach_host(adev, pdata);
if (ret) {
- dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
+ dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n");
goto err_remove_bridge;
}
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index ed96cfcfa304..8c929ef72c72 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -2574,14 +2574,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
+ if (!mstb)
+ return NULL;
+
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
- if (!port->mstb)
- continue;
-
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 340da8257b51..4b71040ae5be 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,6 +123,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ /* BenQ GW2765 */
+ EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 1b00ef2c6185..80e4ec6ee403 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -2553,8 +2553,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
- XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
lane_pipe_reset);
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index aa4d842d4c5a..a2195e28b625 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
+ case -ENOBUFS: /* temporarily out of fences? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
@@ -915,11 +916,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
{
struct file *file;
- rcu_read_lock();
- file = READ_ONCE(i915->gem.mmap_singleton);
- if (file && !get_file_rcu(file))
- file = NULL;
- rcu_read_unlock();
+ file = get_file_active(&i915->gem.mmap_singleton);
if (file)
return file;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 0b414eae1683..2c0f1f3e28ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -376,9 +376,26 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* driver threads, but also with hardware/firmware agents. A dedicated
* locking register is used.
*/
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
+ /*
+ * The steering control and semaphore registers are inside an
+ * "always on" power domain with respect to RC6. However there
+ * are some issues if higher-level platform sleep states are
+ * entering/exiting at the same time these registers are
+ * accessed. Grabbing GT forcewake and holding it over the
+ * entire lock/steer/unlock cycle ensures that those sleep
+ * states have been fully exited before we access these
+ * registers. This wakeref will be released in the unlock
+ * routine.
+ *
+ * This is expected to become a formally documented/numbered
+ * workaround soon.
+ */
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
+
err = wait_for(intel_uncore_read_fw(gt->uncore,
MTL_STEER_SEMAPHORE) == 0x1, 100);
+ }
/*
* Even on platforms with a hardware lock, we'll continue to grab
@@ -415,8 +432,11 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
{
spin_unlock_irqrestore(&gt->mcr_lock, flags);
- if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
+
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_GT);
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 04bc1f4a1115..59e1e21df271 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -482,8 +482,7 @@ static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
{
return !(oa_report_id(stream, report) &
- stream->perf->gen8_valid_ctx_bit) &&
- GRAPHICS_VER(stream->perf->i915) <= 11;
+ stream->perf->gen8_valid_ctx_bit);
}
static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
@@ -5106,6 +5105,7 @@ static void i915_perf_init_info(struct drm_i915_private *i915)
perf->gen8_valid_ctx_bit = BIT(16);
break;
case 12:
+ perf->gen8_valid_ctx_bit = BIT(16);
/*
* Calculate offset at runtime in oa_pin_context for gen12 and
* cache the value in perf->ctx_oactxctrl_offset.
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d35973b41186..7b1076b5e748 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -832,9 +832,18 @@ static void i915_pmu_event_start(struct perf_event *event, int flags)
static void i915_pmu_event_stop(struct perf_event *event, int flags)
{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = &i915->pmu;
+
+ if (pmu->closed)
+ goto out;
+
if (flags & PERF_EF_UPDATE)
i915_pmu_event_read(event);
i915_pmu_disable(event);
+
+out:
event->hw.state = PERF_HES_STOPPED;
}
diff --git a/drivers/gpu/drm/logicvc/Kconfig b/drivers/gpu/drm/logicvc/Kconfig
index fa7a88368809..1df22a852a23 100644
--- a/drivers/gpu/drm/logicvc/Kconfig
+++ b/drivers/gpu/drm/logicvc/Kconfig
@@ -5,5 +5,7 @@ config DRM_LOGICVC
select DRM_KMS_HELPER
select DRM_KMS_DMA_HELPER
select DRM_GEM_DMA_HELPER
+ select REGMAP
+ select REGMAP_MMIO
help
DRM display driver for the logiCVC programmable logic block from Xylon
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 9f364df52478..0e0a41b2f57f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -239,6 +239,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
npages = obj->size >> PAGE_SHIFT;
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
if (!mtk_gem->pages) {
+ sg_free_table(sgt);
kfree(sgt);
return -ENOMEM;
}
@@ -248,12 +249,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!mtk_gem->kvaddr) {
+ sg_free_table(sgt);
kfree(sgt);
kfree(mtk_gem->pages);
return -ENOMEM;
}
-out:
+ sg_free_table(sgt);
kfree(sgt);
+
+out:
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
index 46b057fe1412..3249e5c1c893 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
@@ -62,6 +62,18 @@ nvkm_uconn_uevent_gpio(struct nvkm_object *object, u64 token, u32 bits)
return object->client->event(token, &args, sizeof(args.v0));
}
+static bool
+nvkm_connector_is_dp_dms(u8 type)
+{
+ switch (type) {
+ case DCB_CONNECTOR_DMS59_DP0:
+ case DCB_CONNECTOR_DMS59_DP1:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
@@ -101,7 +113,7 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
/* TODO: support DP IRQ on ANX9805 and remove this hack. */
- if (!outp->info.location)
+ if (!outp->info.location && !nvkm_connector_is_dp_dms(conn->info.type))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index feb665df35a1..95c8472d878a 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -976,32 +976,6 @@ static const struct panel_desc auo_b116xak01 = {
},
};
-static const struct drm_display_mode auo_b116xw03_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xw03 = {
- .modes = &auo_b116xw03_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .enable = 400,
- },
-};
-
static const struct drm_display_mode auo_b133han05_mode = {
.clock = 142600,
.hdisplay = 1920,
@@ -1726,9 +1700,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b116xa01",
.data = &auo_b116xak01,
}, {
- .compatible = "auo,b116xw03",
- .data = &auo_b116xw03,
- }, {
.compatible = "auo,b133han05",
.data = &auo_b133han05,
}, {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 95959dcc6e0e..dd7928d9570f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -919,6 +919,38 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .prepare = 1,
+ .enable = 200,
+ .disable = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
@@ -4103,6 +4135,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 7726a72befc5..d48b39132b32 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -232,10 +232,6 @@ void ttm_device_fini(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned i;
- man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
- ttm_resource_manager_set_used(man, false);
- ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
-
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
@@ -243,6 +239,10 @@ void ttm_device_fini(struct ttm_device *bdev)
drain_workqueue(bdev->wq);
destroy_workqueue(bdev->wq);
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 5a416b39b818..28e2a5fc4528 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -749,6 +749,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
+
+ bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
}
static int aspeed_i2c_reg_slave(struct i2c_client *client)
@@ -765,7 +767,6 @@ static int aspeed_i2c_reg_slave(struct i2c_client *client)
__aspeed_i2c_reg_slave(bus, client->addr);
bus->slave = client;
- bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
spin_unlock_irqrestore(&bus->lock, flags);
return 0;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 579b30581725..0d3c9a041b56 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1059,9 +1059,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure PEC */
if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) {
cr1 |= STM32F7_I2C_CR1_PECEN;
- cr2 |= STM32F7_I2C_CR2_PECBYTE;
- if (!f7_msg->read_write)
+ if (!f7_msg->read_write) {
+ cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
+ }
} else {
cr1 &= ~STM32F7_I2C_CR1_PECEN;
cr2 &= ~STM32F7_I2C_CR2_PECBYTE;
@@ -1149,8 +1150,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev)
f7_msg->stop = true;
/* Add one byte for PEC if needed */
- if (cr1 & STM32F7_I2C_CR1_PECEN)
+ if (cr1 & STM32F7_I2C_CR1_PECEN) {
+ cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
+ }
/* Set number of bytes to be transferred */
cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK);
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 22f2280eab7f..9f2e4aa28159 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -61,7 +61,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
if (ret)
goto err;
- adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
+ adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
if (!adap) {
ret = -ENODEV;
goto err_with_revert;
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index baccf4bfaf02..8305661e1253 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -52,7 +52,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
- parent = of_find_i2c_adapter_by_node(parent_np);
+ parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index 18236b9fa14a..6ebca7bfd8a2 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -62,7 +62,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
- parent = of_find_i2c_adapter_by_node(parent_np);
+ parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index cff1ba57fb16..43c8af41b4a9 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -826,16 +826,26 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
}
+ /* leave out any TS related code if unreachable */
+ if (IS_REACHABLE(CONFIG_INPUT)) {
+ has_ts = of_property_read_bool(pdev->dev.of_node,
+ "has-touchscreen") || pdata;
+ }
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->irq = irq;
- irq = platform_get_irq(pdev, 1);
- if (irq == -EPROBE_DEFER)
- return irq;
+ if (has_ts) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq == -EPROBE_DEFER)
+ return irq;
- info->tsirq = irq;
+ info->tsirq = irq;
+ } else {
+ info->tsirq = -1;
+ }
info->dev = &pdev->dev;
@@ -900,12 +910,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->init_hw)
info->data->init_hw(info);
- /* leave out any TS related code if unreachable */
- if (IS_REACHABLE(CONFIG_INPUT)) {
- has_ts = of_property_read_bool(pdev->dev.of_node,
- "has-touchscreen") || pdata;
- }
-
if (pdata)
info->delay = pdata->delay;
else
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index dba73300f894..564c0cad0fc7 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -456,6 +456,9 @@ static const struct xadc_ops xadc_zynq_ops = {
.interrupt_handler = xadc_zynq_interrupt_handler,
.update_alarm = xadc_zynq_update_alarm,
.type = XADC_TYPE_S7,
+ /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
+ .temp_scale = 503975,
+ .temp_offset = 273150,
};
static const unsigned int xadc_axi_reg_offsets[] = {
@@ -566,6 +569,9 @@ static const struct xadc_ops xadc_7s_axi_ops = {
.interrupt_handler = xadc_axi_interrupt_handler,
.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_S7,
+ /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
+ .temp_scale = 503975,
+ .temp_offset = 273150,
};
static const struct xadc_ops xadc_us_axi_ops = {
@@ -577,6 +583,12 @@ static const struct xadc_ops xadc_us_axi_ops = {
.interrupt_handler = xadc_axi_interrupt_handler,
.flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_US,
+ /**
+ * Values below are for UltraScale+ (SYSMONE4) using internal reference.
+ * See https://docs.xilinx.com/v/u/en-US/ug580-ultrascale-sysmon
+ */
+ .temp_scale = 509314,
+ .temp_offset = 280231,
};
static int _xadc_update_adc_reg(struct xadc *xadc, unsigned int reg,
@@ -945,8 +957,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
- /* Temp in C = (val * 503.975) / 2**bits - 273.15 */
- *val = 503975;
+ *val = xadc->ops->temp_scale;
*val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
@@ -954,7 +965,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
}
case IIO_CHAN_INFO_OFFSET:
/* Only the temperature channel has an offset */
- *val = -((273150 << bits) / 503975);
+ *val = -((xadc->ops->temp_offset << bits) / xadc->ops->temp_scale);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = xadc_read_samplerate(xadc);
@@ -1423,28 +1434,6 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- /* Disable all alarms */
- ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
- XADC_CONF1_ALARM_MASK);
- if (ret)
- return ret;
-
- /* Set thresholds to min/max */
- for (i = 0; i < 16; i++) {
- /*
- * Set max voltage threshold and both temperature thresholds to
- * 0xffff, min voltage threshold to 0.
- */
- if (i % 8 < 4 || i == 7)
- xadc->threshold[i] = 0xffff;
- else
- xadc->threshold[i] = 0;
- ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
- xadc->threshold[i]);
- if (ret)
- return ret;
- }
-
/* Go to non-buffered mode */
xadc_postdisable(indio_dev);
diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h
index 7d78ce698967..3036f4d613ff 100644
--- a/drivers/iio/adc/xilinx-xadc.h
+++ b/drivers/iio/adc/xilinx-xadc.h
@@ -85,6 +85,8 @@ struct xadc_ops {
unsigned int flags;
enum xadc_type type;
+ int temp_scale;
+ int temp_offset;
};
static inline int _xadc_read_adc_reg(struct xadc *xadc, unsigned int reg,
diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c
index 1f280c360701..56e5913ab82d 100644
--- a/drivers/iio/afe/iio-rescale.c
+++ b/drivers/iio/afe/iio-rescale.c
@@ -214,8 +214,18 @@ static int rescale_read_raw(struct iio_dev *indio_dev,
return ret < 0 ? ret : -EOPNOTSUPP;
}
- ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
- return rescale_process_offset(rescale, ret, scale, scale2,
+ if (iio_channel_has_info(rescale->source->channel,
+ IIO_CHAN_INFO_SCALE)) {
+ ret = iio_read_channel_scale(rescale->source, &scale, &scale2);
+ return rescale_process_offset(rescale, ret, scale, scale2,
+ schan_off, val, val2);
+ }
+
+ /*
+ * If we get here we have no scale so scale 1:1 but apply
+ * rescaler and offset, if any.
+ */
+ return rescale_process_offset(rescale, IIO_VAL_FRACTIONAL, 1, 1,
schan_off, val, val2);
default:
return -EINVAL;
@@ -280,8 +290,9 @@ static int rescale_configure_channel(struct device *dev,
chan->type = rescale->cfg->type;
if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) &&
- iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) {
- dev_info(dev, "using raw+scale source channel\n");
+ (iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE) ||
+ iio_channel_has_info(schan, IIO_CHAN_INFO_OFFSET))) {
+ dev_info(dev, "using raw+scale/offset source channel\n");
} else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) {
dev_info(dev, "using processed channel\n");
rescale->chan_processed = true;
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index a5ab22cedd41..788fc249234f 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -267,7 +267,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
if (!HFI1_CAP_IS_KSET(SDMA))
return -EINVAL;
- if (!from->user_backed)
+ if (!user_backed_iter(from))
return -EINVAL;
idx = srcu_read_lock(&fd->pq_srcu);
pq = srcu_dereference(fd->pq, &fd->pq_srcu);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 152952127f13..29e4c59aa23b 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2244,7 +2244,7 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
struct qib_user_sdma_queue *pq = fp->pq;
- if (!from->user_backed || !from->nr_segs || !pq)
+ if (!user_backed_iter(from) || !from->nr_segs || !pq)
return -EINVAL;
return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs);
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index ed7d4b02f45a..455e966eeff3 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -64,8 +64,8 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_blocks = 0;
- inode->i_atime = inode_set_ctime_current(inode);
- inode->i_mtime = inode->i_atime;
+ simple_inode_init_ts(inode);
+
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3bfc56df4f78..c146378c7d03 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1108,7 +1108,8 @@ map_end:
}
- iommu_flush_iotlb_all(domain);
+ if (!list_empty(&mappings) && iommu_is_dma_domain(domain))
+ iommu_flush_iotlb_all(domain);
out:
iommu_put_resv_regions(dev, &mappings);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 75a2dd550625..a8c89df1a997 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -5112,8 +5112,6 @@ static int __init its_probe_one(struct its_node *its)
}
its->cmd_base = (void *)page_address(page);
its->cmd_write = its->cmd_base;
- its->get_msi_base = its_irq_get_msi_base;
- its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
err = its_alloc_tables(its);
if (err)
@@ -5362,6 +5360,8 @@ static struct its_node __init *its_node_init(struct resource *res,
its->typer = gic_read_typer(its_base + GITS_TYPER);
its->base = its_base;
its->phys_base = res->start;
+ its->get_msi_base = its_irq_get_msi_base;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
its->numa_node = numa_node;
its->fwnode_handle = handle;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 1efd17979f24..b82b89888a5e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -678,7 +678,7 @@ ph_state(struct dchannel *dch)
}
/*
- * disable/enable BChannel for desired protocoll
+ * disable/enable BChannel for desired protocol
*/
static int
hfcsusb_setup_bch(struct bchannel *bch, int protocol)
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 529c9d04e9a4..b2d10063d35f 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -4,6 +4,7 @@ config BCACHE
tristate "Block device as cache"
select BLOCK_HOLDER_DEPRECATED if SYSFS
select CRC64
+ select CLOSURES
help
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
@@ -19,15 +20,6 @@ config BCACHE_DEBUG
Enables extra debugging tools, allows expensive runtime checks to be
turned on.
-config BCACHE_CLOSURES_DEBUG
- bool "Debug closures"
- depends on BCACHE
- select DEBUG_FS
- help
- Keeps all active closures in a linked list and provides a debugfs
- interface to list them, which makes it possible to see asynchronous
- operations that get stuck.
-
config BCACHE_ASYNC_REGISTRATION
bool "Asynchronous device registration"
depends on BCACHE
diff --git a/drivers/md/bcache/Makefile b/drivers/md/bcache/Makefile
index 5b87e59676b8..054e8a33a7ab 100644
--- a/drivers/md/bcache/Makefile
+++ b/drivers/md/bcache/Makefile
@@ -2,6 +2,6 @@
obj-$(CONFIG_BCACHE) += bcache.o
-bcache-y := alloc.o bset.o btree.o closure.o debug.o extents.o\
- io.o journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
+bcache-y := alloc.o bset.o btree.o debug.o extents.o io.o\
+ journal.o movinggc.o request.o stats.o super.o sysfs.o trace.o\
util.o writeback.o features.o
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 5a79bb3c272f..313cee6ad009 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -179,6 +179,7 @@
#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
#include <linux/bio.h>
+#include <linux/closure.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -192,7 +193,6 @@
#include "bcache_ondisk.h"
#include "bset.h"
#include "util.h"
-#include "closure.h"
struct bucket {
atomic_t pin;
@@ -299,6 +299,7 @@ struct cached_dev {
struct list_head list;
struct bcache_device disk;
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct cache_sb sb;
struct cache_sb_disk *sb_disk;
@@ -421,6 +422,7 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct task_struct *alloc_thread;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 0ae2b3676293..8bd899766372 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1368,8 +1368,8 @@ static void cached_dev_free(struct closure *cl)
if (dc->sb_disk)
put_page(virt_to_page(dc->sb_disk));
- if (!IS_ERR_OR_NULL(dc->bdev))
- blkdev_put(dc->bdev, dc);
+ if (dc->bdev_handle)
+ bdev_release(dc->bdev_handle);
wake_up(&unregister_wait);
@@ -1444,7 +1444,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct block_device *bdev,
+ struct bdev_handle *bdev_handle,
struct cached_dev *dc)
{
const char *err = "cannot allocate memory";
@@ -1452,14 +1452,15 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
int ret = -ENOMEM;
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
- dc->bdev = bdev;
+ dc->bdev_handle = bdev_handle;
+ dc->bdev = bdev_handle->bdev;
dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
err = "error creating kobject";
- if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache"))
+ if (kobject_add(&dc->disk.kobj, bdev_kobj(dc->bdev), "bcache"))
goto err;
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
@@ -2216,8 +2217,8 @@ void bch_cache_release(struct kobject *kobj)
if (ca->sb_disk)
put_page(virt_to_page(ca->sb_disk));
- if (!IS_ERR_OR_NULL(ca->bdev))
- blkdev_put(ca->bdev, ca);
+ if (ca->bdev_handle)
+ bdev_release(ca->bdev_handle);
kfree(ca);
module_put(THIS_MODULE);
@@ -2337,38 +2338,42 @@ err_free:
}
static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
- struct block_device *bdev, struct cache *ca)
+ struct bdev_handle *bdev_handle,
+ struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
- ca->bdev = bdev;
+ ca->bdev_handle = bdev_handle;
+ ca->bdev = bdev_handle->bdev;
ca->sb_disk = sb_disk;
- if (bdev_max_discard_sectors((bdev)))
+ if (bdev_max_discard_sectors((bdev_handle->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
if (ret != 0) {
- /*
- * If we failed here, it means ca->kobj is not initialized yet,
- * kobject_put() won't be called and there is no chance to
- * call blkdev_put() to bdev in bch_cache_release(). So we
- * explicitly call blkdev_put() here.
- */
- blkdev_put(bdev, ca);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else if (ret == -EPERM)
err = "cache_alloc(): cache device is too small";
else
err = "cache_alloc(): unknown error";
- goto err;
+ pr_notice("error %pg: %s\n", bdev_handle->bdev, err);
+ /*
+ * If we failed here, it means ca->kobj is not initialized yet,
+ * kobject_put() won't be called and there is no chance to
+ * call bdev_release() to bdev in bch_cache_release(). So
+ * we explicitly call bdev_release() here.
+ */
+ bdev_release(bdev_handle);
+ return ret;
}
- if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) {
- err = "error calling kobject_add";
+ if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) {
+ pr_notice("error %pg: error calling kobject_add\n",
+ bdev_handle->bdev);
ret = -ENOMEM;
goto out;
}
@@ -2382,15 +2387,10 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %pg\n", ca->bdev);
+ pr_info("registered cache device %pg\n", ca->bdev_handle->bdev);
out:
kobject_put(&ca->kobj);
-
-err:
- if (err)
- pr_notice("error %pg: %s\n", ca->bdev, err);
-
return ret;
}
@@ -2445,7 +2445,7 @@ struct async_reg_args {
char *path;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
void *holder;
};
@@ -2456,8 +2456,8 @@ static void register_bdev_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
mutex_lock(&bch_register_lock);
- if (register_bdev(args->sb, args->sb_disk, args->bdev, args->holder)
- < 0)
+ if (register_bdev(args->sb, args->sb_disk, args->bdev_handle,
+ args->holder) < 0)
fail = true;
mutex_unlock(&bch_register_lock);
@@ -2477,7 +2477,8 @@ static void register_cache_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(args->sb, args->sb_disk, args->bdev, args->holder))
+ if (register_cache(args->sb, args->sb_disk, args->bdev_handle,
+ args->holder))
fail = true;
if (fail)
@@ -2514,7 +2515,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
char *path = NULL;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
- struct block_device *bdev, *bdev2;
+ struct bdev_handle *bdev_handle, *bdev_handle2;
void *holder = NULL;
ssize_t ret;
bool async_registration = false;
@@ -2547,15 +2548,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
- bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
- if (IS_ERR(bdev))
+ bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(bdev_handle))
goto out_free_sb;
err = "failed to set blocksize";
- if (set_blocksize(bdev, 4096))
+ if (set_blocksize(bdev_handle->bdev, 4096))
goto out_blkdev_put;
- err = read_super(sb, bdev, &sb_disk);
+ err = read_super(sb, bdev_handle->bdev, &sb_disk);
if (err)
goto out_blkdev_put;
@@ -2567,13 +2568,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
}
/* Now reopen in exclusive mode with proper holder */
- bdev2 = blkdev_get_by_dev(bdev->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- holder, NULL);
- blkdev_put(bdev, NULL);
- bdev = bdev2;
- if (IS_ERR(bdev)) {
- ret = PTR_ERR(bdev);
- bdev = NULL;
+ bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL);
+ bdev_release(bdev_handle);
+ bdev_handle = bdev_handle2;
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
+ bdev_handle = NULL;
if (ret == -EBUSY) {
dev_t dev;
@@ -2608,7 +2609,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
args->path = path;
args->sb = sb;
args->sb_disk = sb_disk;
- args->bdev = bdev;
+ args->bdev_handle = bdev_handle;
args->holder = holder;
register_device_async(args);
/* No wait and returns to user space */
@@ -2617,14 +2618,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
mutex_lock(&bch_register_lock);
- ret = register_bdev(sb, sb_disk, bdev, holder);
+ ret = register_bdev(sb, sb_disk, bdev_handle, holder);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
goto out_free_sb;
} else {
/* blkdev_put() will be called in bch_cache_release() */
- ret = register_cache(sb, sb_disk, bdev, holder);
+ ret = register_cache(sb, sb_disk, bdev_handle, holder);
if (ret)
goto out_free_sb;
}
@@ -2640,8 +2641,8 @@ out_free_holder:
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
- if (bdev)
- blkdev_put(bdev, holder);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
out_free_sb:
kfree(sb);
out_free_path:
@@ -2905,7 +2906,6 @@ static int __init bcache_init(void)
goto err;
bch_debug_init();
- closure_debug_init();
bcache_is_reboot = false;
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 6f3cb7c92130..f61ab1bada6c 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -4,6 +4,7 @@
#define _BCACHE_UTIL_H
#include <linux/blkdev.h>
+#include <linux/closure.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched/clock.h>
@@ -13,8 +14,6 @@
#include <linux/workqueue.h>
#include <linux/crc64.h>
-#include "closure.h"
-
struct closure;
#ifdef CONFIG_BCACHE_DEBUG
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 64a1f306c96c..f7212e8fc27f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -724,7 +724,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
dev_t dev, blk_mode_t mode)
{
struct table_device *td;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
u64 part_off;
int r;
@@ -733,9 +733,9 @@ static struct table_device *open_table_device(struct mapped_device *md,
return ERR_PTR(-ENOMEM);
refcount_set(&td->count, 1);
- bdev = blkdev_get_by_dev(dev, mode, _dm_claim_ptr, NULL);
- if (IS_ERR(bdev)) {
- r = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
+ if (IS_ERR(bdev_handle)) {
+ r = PTR_ERR(bdev_handle);
goto out_free_td;
}
@@ -745,20 +745,22 @@ static struct table_device *open_table_device(struct mapped_device *md,
* called.
*/
if (md->disk->slave_dir) {
- r = bd_link_disk_holder(bdev, md->disk);
+ r = bd_link_disk_holder(bdev_handle->bdev, md->disk);
if (r)
goto out_blkdev_put;
}
td->dm_dev.mode = mode;
- td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off, NULL, NULL);
+ td->dm_dev.bdev = bdev_handle->bdev;
+ td->dm_dev.bdev_handle = bdev_handle;
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, &part_off,
+ NULL, NULL);
format_dev_t(td->dm_dev.name, dev);
list_add(&td->list, &md->table_devices);
return td;
out_blkdev_put:
- blkdev_put(bdev, _dm_claim_ptr);
+ bdev_release(bdev_handle);
out_free_td:
kfree(td);
return ERR_PTR(r);
@@ -771,7 +773,7 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
{
if (md->disk->slave_dir)
bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
- blkdev_put(td->dm_dev.bdev, _dm_claim_ptr);
+ bdev_release(td->dm_dev.bdev_handle);
put_dax(td->dm_dev.dax_dev);
list_del(&td->list);
kfree(td);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index a104a025084d..839e79e567ee 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2452,8 +2452,7 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
if (test_bit(AutoDetected, &rdev->flags))
md_autodetect_dev(rdev->bdev->bd_dev);
#endif
- blkdev_put(rdev->bdev,
- test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
+ bdev_release(rdev->bdev_handle);
rdev->bdev = NULL;
kobject_put(&rdev->kobj);
}
@@ -3633,7 +3632,6 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
{
struct md_rdev *rdev;
- struct md_rdev *holder;
sector_t size;
int err;
@@ -3648,21 +3646,16 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
if (err)
goto out_clear_rdev;
- if (super_format == -2) {
- holder = &claim_rdev;
- } else {
- holder = rdev;
- set_bit(Holder, &rdev->flags);
- }
-
- rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- holder, NULL);
- if (IS_ERR(rdev->bdev)) {
+ rdev->bdev_handle = bdev_open_by_dev(newdev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ super_format == -2 ? &claim_rdev : rdev, NULL);
+ if (IS_ERR(rdev->bdev_handle)) {
pr_warn("md: could not open device unknown-block(%u,%u).\n",
MAJOR(newdev), MINOR(newdev));
- err = PTR_ERR(rdev->bdev);
+ err = PTR_ERR(rdev->bdev_handle);
goto out_clear_rdev;
}
+ rdev->bdev = rdev->bdev_handle->bdev;
kobject_init(&rdev->kobj, &rdev_ktype);
@@ -3693,7 +3686,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
return rdev;
out_blkdev_put:
- blkdev_put(rdev->bdev, holder);
+ bdev_release(rdev->bdev_handle);
out_clear_rdev:
md_rdev_clear(rdev);
out_free_rdev:
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7c9c13abd7ca..274e7d61d19f 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -59,6 +59,7 @@ struct md_rdev {
*/
struct block_device *meta_bdev;
struct block_device *bdev; /* block device handle */
+ struct bdev_handle *bdev_handle; /* Handle from open for bdev */
struct page *sb_page, *bb_page;
int sb_loaded;
@@ -211,9 +212,6 @@ enum flag_bits {
* check if there is collision between raid1
* serial bios.
*/
- Holder, /* rdev is used as holder while opening
- * underlying disk exclusively.
- */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index a66b7c111cd5..1c6c62a7f7f5 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -958,6 +958,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
if (err)
return err;
+ memset(ctx->buf->virt, 0, pkt_size);
rpra = ctx->buf->virt;
list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
pages = fastrpc_phy_page_start(list, ctx->nscalars);
@@ -1090,6 +1091,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
}
}
+ /* Clean up fdlist which is updated by DSP */
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
@@ -1156,11 +1158,9 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (ctx->nscalars) {
- err = fastrpc_get_args(kernel, ctx);
- if (err)
- goto bail;
- }
+ err = fastrpc_get_args(kernel, ctx);
+ if (err)
+ goto bail;
/* make sure that all CPU memory writes are seen by DSP */
dma_wmb();
@@ -1179,20 +1179,18 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (err)
goto bail;
+ /* make sure that all memory writes by DSP are seen by CPU */
+ dma_rmb();
+ /* populate all the output buffers with results */
+ err = fastrpc_put_args(ctx, kernel);
+ if (err)
+ goto bail;
+
/* Check the response from remote dsp */
err = ctx->retval;
if (err)
goto bail;
- if (ctx->nscalars) {
- /* make sure that all memory writes by DSP are seen by CPU */
- dma_rmb();
- /* populate all the output buffers with results */
- err = fastrpc_put_args(ctx, kernel);
- if (err)
- goto bail;
- }
-
bail:
if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
/* We are done with this compute context */
@@ -1983,11 +1981,13 @@ static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_me
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
- fastrpc_map_put(map);
- if (err)
+ if (err) {
dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
+ return err;
+ }
+ fastrpc_map_put(map);
- return err;
+ return 0;
}
static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 5867af9f592c..c44de892a61e 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -139,7 +139,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
if (ret) {
ret->i_ino = get_next_ino();
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
}
return ret;
}
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 2101eb12bcba..7739b783c2db 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -1124,7 +1124,7 @@ static ssize_t ibmvmc_write(struct file *file, const char *buffer,
goto out;
inode = file_inode(file);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index b5b414a71e0b..3a8f27c3e310 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -179,6 +179,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
static int mmc_spi_err_check(struct mmc_card *card);
+static int mmc_blk_busy_cb(void *cb_data, bool *busy);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -470,7 +471,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
- bool r1b_resp, use_r1b_resp = false;
+ bool r1b_resp;
unsigned int busy_timeout_ms;
int err;
unsigned int target_part;
@@ -551,8 +552,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
if (r1b_resp)
- use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd,
- busy_timeout_ms);
+ mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
mmc_wait_for_req(card->host, &mrq);
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
@@ -605,19 +605,28 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
- /* No need to poll when using HW busy detection. */
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- return 0;
-
if (mmc_host_is_spi(card->host)) {
if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
return mmc_spi_err_check(card);
return err;
}
- /* Ensure RPMB/R1B command has completed by polling with CMD13. */
- if (idata->rpmb || r1b_resp)
- err = mmc_poll_for_busy(card, busy_timeout_ms, false,
- MMC_BUSY_IO);
+
+ /*
+ * Ensure RPMB, writes and R1B responses are completed by polling with
+ * CMD13. Note that, usually we don't need to poll when using HW busy
+ * detection, but here it's needed since some commands may indicate the
+ * error through the R1 status bits.
+ */
+ if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
+ struct mmc_blk_busy_data cb_data = {
+ .card = card,
+ };
+
+ err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
+ &mmc_blk_busy_cb, &cb_data);
+
+ idata->ic.response[0] = cb_data.status;
+ }
return err;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 89cd48fcec79..4a4bab9aa726 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
case 3: /* MMC v3.1 - v3.3 */
case 4: /* MMC v4 */
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 8);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index f64b9ac76a5c..5914516df2f7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1089,8 +1089,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
err = mmc_sdio_reinit_card(host);
} else if (mmc_card_wake_sdio_irq(host)) {
- /* We may have switched to 1-bit mode during suspend */
+ /*
+ * We may have switched to 1-bit mode during suspend,
+ * need to hold retuning, because tuning only supprt
+ * 4-bit mode or 8 bit mode.
+ */
+ mmc_retune_hold_now(host);
err = sdio_enable_4bit_bus(host->card);
+ mmc_retune_release(host);
}
if (err)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 5392200cfdf7..97f7c3d4be6e 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -669,11 +669,11 @@ static void msdc_reset_hw(struct msdc_host *host)
u32 val;
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
- readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
+ readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
- readl_poll_timeout(host->base + MSDC_FIFOCS, val,
- !(val & MSDC_FIFOCS_CLR), 0, 0);
+ readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
+ !(val & MSDC_FIFOCS_CLR), 0, 0);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index ae8c307b7aa7..109d4b010f97 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -1144,42 +1144,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
return value;
}
-#ifdef CONFIG_PM_SLEEP
-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
-
- pci_free_irq_vectors(slot->chip->pdev);
- gli_pcie_enable_msi(slot);
-
- return sdhci_pci_resume_host(chip);
-}
-
-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = sdhci_pci_gli_resume(chip);
- if (ret)
- return ret;
-
- return cqhci_resume(slot->host->mmc);
-}
-
-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = cqhci_suspend(slot->host->mmc);
- if (ret)
- return ret;
-
- return sdhci_suspend_host(slot->host);
-}
-#endif
-
static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -1420,6 +1384,70 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+
+ pci_free_irq_vectors(slot->chip->pdev);
+ gli_pcie_enable_msi(slot);
+
+ return sdhci_pci_resume_host(chip);
+}
+
+static int gl9763e_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ ret = sdhci_pci_gli_resume(chip);
+ if (ret)
+ return ret;
+
+ ret = cqhci_resume(slot->host->mmc);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable LPM negotiation to bring device back in sync
+ * with its runtime_pm state.
+ */
+ gl9763e_set_low_power_negotiation(slot, false);
+
+ return 0;
+}
+
+static int gl9763e_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ /*
+ * Certain SoCs can suspend only with the bus in low-
+ * power state, notably x86 SoCs when using S0ix.
+ * Re-enable LPM negotiation to allow entering L1 state
+ * and entering system suspend.
+ */
+ gl9763e_set_low_power_negotiation(slot, true);
+
+ ret = cqhci_suspend(slot->host->mmc);
+ if (ret)
+ goto err_suspend;
+
+ ret = sdhci_suspend_host(slot->host);
+ if (ret)
+ goto err_suspend_host;
+
+ return 0;
+
+err_suspend_host:
+ cqhci_resume(slot->host->mmc);
+err_suspend:
+ gl9763e_set_low_power_negotiation(slot, false);
+ return ret;
+}
+#endif
+
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
@@ -1527,8 +1555,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
.probe_slot = gli_probe_slot_gl9763e,
.ops = &sdhci_gl9763e_ops,
#ifdef CONFIG_PM_SLEEP
- .resume = sdhci_cqhci_gli_resume,
- .suspend = sdhci_cqhci_gli_suspend,
+ .resume = gl9763e_resume,
+ .suspend = gl9763e_suspend,
#endif
#ifdef CONFIG_PM
.runtime_suspend = gl9763e_runtime_suspend,
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 649ae075e229..6b84ba27e6ab 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -644,6 +644,7 @@ static int sdhci_sprd_tuning(struct mmc_host *mmc, struct mmc_card *card,
best_clk_sample = sdhci_sprd_get_best_clk_sample(mmc, value);
if (best_clk_sample < 0) {
dev_err(mmc_dev(host->mmc), "all tuning phase fail!\n");
+ err = best_clk_sample;
goto out;
}
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index be106dc20ff3..aa44a23ec045 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -37,7 +37,7 @@
/* Info for the block device */
struct block2mtd_dev {
struct list_head list;
- struct block_device *blkdev;
+ struct bdev_handle *bdev_handle;
struct mtd_info mtd;
struct mutex write_mutex;
};
@@ -55,7 +55,8 @@ static struct page *page_read(struct address_space *mapping, pgoff_t index)
/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
- struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct address_space *mapping =
+ dev->bdev_handle->bdev->bd_inode->i_mapping;
struct page *page;
pgoff_t index = to >> PAGE_SHIFT; // page index
int pages = len >> PAGE_SHIFT;
@@ -105,6 +106,8 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
+ struct address_space *mapping =
+ dev->bdev_handle->bdev->bd_inode->i_mapping;
struct page *page;
pgoff_t index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
@@ -117,7 +120,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
cpylen = len; // this page
len = len - cpylen;
- page = page_read(dev->blkdev->bd_inode->i_mapping, index);
+ page = page_read(mapping, index);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -139,7 +142,8 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
loff_t to, size_t len, size_t *retlen)
{
struct page *page;
- struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct address_space *mapping =
+ dev->bdev_handle->bdev->bd_inode->i_mapping;
pgoff_t index = to >> PAGE_SHIFT; // page index
int offset = to & ~PAGE_MASK; // page offset
int cpylen;
@@ -194,7 +198,7 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->blkdev);
+ sync_blockdev(dev->bdev_handle->bdev);
return;
}
@@ -206,10 +210,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
kfree(dev->mtd.name);
- if (dev->blkdev) {
- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
- 0, -1);
- blkdev_put(dev->blkdev, NULL);
+ if (dev->bdev_handle) {
+ invalidate_mapping_pages(
+ dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
+ bdev_release(dev->bdev_handle);
}
kfree(dev);
@@ -219,10 +223,10 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
* This function is marked __ref because it calls the __init marked
* early_lookup_bdev when called from the early boot code.
*/
-static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
+static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
{
- struct block_device *bdev = ERR_PTR(-ENODEV);
+ struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV);
#ifndef MODULE
int i;
@@ -230,7 +234,7 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
* We can't use early_lookup_bdev from a running system.
*/
if (system_state >= SYSTEM_RUNNING)
- return bdev;
+ return bdev_handle;
/*
* We might not have the root device mounted at this point.
@@ -249,19 +253,20 @@ static struct block_device __ref *mdtblock_early_get_bdev(const char *devname,
wait_for_device_probe();
if (!early_lookup_bdev(devname, &devt)) {
- bdev = blkdev_get_by_dev(devt, mode, dev, NULL);
- if (!IS_ERR(bdev))
+ bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL);
+ if (!IS_ERR(bdev_handle))
break;
}
}
#endif
- return bdev;
+ return bdev_handle;
}
static struct block2mtd_dev *add_device(char *devname, int erase_size,
char *label, int timeout)
{
const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
struct block2mtd_dev *dev;
char *name;
@@ -274,21 +279,23 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
return NULL;
/* Get a handle on the device */
- bdev = blkdev_get_by_path(devname, mode, dev, NULL);
- if (IS_ERR(bdev))
- bdev = mdtblock_early_get_bdev(devname, mode, timeout, dev);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_path(devname, mode, dev, NULL);
+ if (IS_ERR(bdev_handle))
+ bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout,
+ dev);
+ if (IS_ERR(bdev_handle)) {
pr_err("error: cannot open device %s\n", devname);
goto err_free_block2mtd;
}
- dev->blkdev = bdev;
+ dev->bdev_handle = bdev_handle;
+ bdev = bdev_handle->bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
pr_err("attempting to use an MTD device as a block device\n");
goto err_free_block2mtd;
}
- if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
+ if ((long)bdev->bd_inode->i_size % erase_size) {
pr_err("erasesize must be a divisor of device size\n");
goto err_free_block2mtd;
}
@@ -306,7 +313,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size,
dev->mtd.name = name;
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.writebufsize = PAGE_SIZE;
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 78710fbc8e7f..fc8721339282 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -551,6 +551,17 @@ static int physmap_flash_probe(struct platform_device *dev)
if (info->probe_type) {
info->mtds[i] = do_map_probe(info->probe_type,
&info->maps[i]);
+
+ /* Fall back to mapping region as ROM */
+ if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
+ strcmp(info->probe_type, "map_rom")) {
+ dev_warn(&dev->dev,
+ "map_probe() failed for type %s\n",
+ info->probe_type);
+
+ info->mtds[i] = do_map_probe("map_rom",
+ &info->maps[i]);
+ }
} else {
int j;
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 4621ec549cc7..a492051c46f5 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
dma_addr_t dma_addr;
+ u8 status;
int ret;
struct anfc_op nfc_op = {
.pkt_reg =
@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
}
/* Spare data is not protected */
- if (oob_required)
+ if (oob_required) {
ret = nand_write_oob_std(chip, page);
+ if (ret)
+ return ret;
+ }
- return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 2c94da7a3b3a..b841a81cb128 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1165,6 +1165,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
};
unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ u8 status;
int ret;
/* NFCv2 needs more information about the operation being executed */
@@ -1198,7 +1199,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
ret = marvell_nfc_wait_op(chip,
PSEC_TO_MSEC(sdr->tPROG_max));
- return ret;
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
@@ -1627,6 +1639,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int chunk, ret;
+ u8 status;
marvell_nfc_select_target(chip, chip->cur_cs);
@@ -1663,6 +1676,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
if (ret)
return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index d4b55155aeae..1fcac403cee6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -5110,6 +5110,9 @@ static void rawnand_check_cont_read_support(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (!chip->parameters.supports_read_cache)
+ return;
+
if (chip->read_retries)
return;
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 836757717660..b3cc8f360529 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -94,6 +94,9 @@ int nand_jedec_detect(struct nand_chip *chip)
goto free_jedec_param_page;
}
+ if (p->opt_cmd[0] & JEDEC_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
memorg->pagesize = le32_to_cpu(p->byte_per_page);
mtd->writesize = memorg->pagesize;
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index f15ef90aec8c..861975e44b55 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -303,6 +303,9 @@ int nand_onfi_detect(struct nand_chip *chip)
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
if (!onfi) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 8da5fee321b5..c506e92a3e45 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -511,6 +511,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
u32 addr1 = 0, addr2 = 0, row;
u32 cmd_addr;
int i, ret;
+ u8 status;
ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
if (ret)
@@ -563,6 +564,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
if (ret)
goto disable_ecc_engine;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto disable_ecc_engine;
+
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
disable_ecc_engine:
pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 64499c1b3603..b079605c84d3 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -3444,7 +3444,7 @@ err_nandc_alloc:
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- dma_unmap_resource(dev, res->start, resource_size(res),
+ dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
DMA_BIDIRECTIONAL, 0);
return ret;
}
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 50b7295bc922..12601bc4227a 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -12,7 +12,7 @@
#define SPINAND_MFR_MICRON 0x2c
-#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_MASK GENMASK(6, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ed7212e61c54..51d47eda1c87 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4023,7 +4023,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb,
if (likely(n <= hlen))
return data;
else if (skb && likely(pskb_may_pull(skb, n)))
- return skb->head;
+ return skb->data;
return NULL;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 72374b066f64..cd1f240c90f3 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -617,17 +617,16 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
priv->master_mii_bus = of_mdio_find_bus(dn);
if (!priv->master_mii_bus) {
- of_node_put(dn);
- return -EPROBE_DEFER;
+ err = -EPROBE_DEFER;
+ goto err_of_node_put;
}
- get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
- of_node_put(dn);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_put_master_mii_bus_dev;
}
priv->slave_mii_bus->priv = priv;
@@ -684,11 +683,17 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn) {
- mdiobus_free(priv->slave_mii_bus);
- of_node_put(dn);
- }
+ if (err && dn)
+ goto err_free_slave_mii_bus;
+ return 0;
+
+err_free_slave_mii_bus:
+ mdiobus_free(priv->slave_mii_bus);
+err_put_master_mii_bus_dev:
+ put_device(&priv->master_mii_bus->dev);
+err_of_node_put:
+ of_node_put(dn);
return err;
}
@@ -696,6 +701,7 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
mdiobus_free(priv->slave_mii_bus);
+ put_device(&priv->master_mii_bus->dev);
of_node_put(priv->master_mii_dn);
}
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index ca66b747b7c5..d7c274af6d4d 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
{
struct adin1110_priv *priv = port_priv->priv;
u32 header_len = ADIN1110_RD_HEADER_LEN;
- struct spi_transfer t;
+ struct spi_transfer t = {0};
u32 frame_size_no_fcs;
struct sk_buff *rxb;
u32 frame_size;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4d4140b7c450..f3305c434c95 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2170,7 +2170,7 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
- .of_match_table = of_match_ptr(xgene_enet_of_match),
+ .of_match_table = xgene_enet_of_match,
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8d719f82854a..76de55306c4d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
+ if (ret)
+ return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 5fc64e47568a..d567e42e1760 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -911,7 +911,7 @@ static int csk_wait_memory(struct chtls_dev *cdev,
struct sock *sk, long *timeo_p)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int err = 0;
+ int ret, err = 0;
long current_timeo;
long vm_wait = 0;
bool noblock;
@@ -942,10 +942,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, sk->sk_err ||
- (sk->sk_shutdown & SEND_SHUTDOWN) ||
- (csk_mem_free(cdev, sk) && !vm_wait), &wait);
+ ret = sk_wait_event(sk, &current_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (csk_mem_free(cdev, sk) && !vm_wait),
+ &wait);
sk->sk_write_pending--;
+ if (ret < 0)
+ goto do_error;
if (vm_wait) {
vm_wait -= current_timeo;
@@ -1348,6 +1351,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int copied = 0;
int target;
long timeo;
+ int ret;
buffers_freed = 0;
@@ -1423,7 +1427,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (copied >= target)
break;
chtls_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ copied = copied ? : ret;
+ goto unlock;
+ }
continue;
found_ok_skb:
if (!skb->len) {
@@ -1518,6 +1526,8 @@ skip_copy:
if (buffers_freed)
chtls_cleanup_rbuf(sk, copied);
+
+unlock:
release_sock(sk);
return copied;
}
@@ -1534,6 +1544,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
int copied = 0;
size_t avail; /* amount of available data in current skb */
long timeo;
+ int ret;
lock_sock(sk);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
@@ -1585,7 +1596,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
release_sock(sk);
lock_sock(sk);
} else {
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ /* here 'copied' is 0 due to previous checks */
+ copied = ret;
+ break;
+ }
}
if (unlikely(peek_seq != tp->copied_seq)) {
@@ -1656,6 +1672,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int copied = 0;
long timeo;
int target; /* Read at least this many bytes */
+ int ret;
buffers_freed = 0;
@@ -1747,7 +1764,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (copied >= target)
break;
chtls_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0) {
+ copied = copied ? : ret;
+ goto unlock;
+ }
continue;
found_ok_skb:
@@ -1816,6 +1837,7 @@ skip_copy:
if (buffers_freed)
chtls_cleanup_rbuf(sk, copied);
+unlock:
release_sock(sk);
return copied;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index d1da7413dc4d..e84a066aa1a4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -146,7 +146,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
if (err)
- goto alloc_err;
+ goto alloc_err_rda;
}
if (!rx->data.raw_addressing) {
@@ -171,12 +171,26 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return slots;
alloc_err_qpl:
+ /* Fully free the copy pool pages. */
while (j--) {
page_ref_sub(rx->qpl_copy_pool[j].page,
rx->qpl_copy_pool[j].pagecnt_bias - 1);
put_page(rx->qpl_copy_pool[j].page);
}
-alloc_err:
+
+ /* Do not fully free QPL pages - only remove the bias added in this
+ * function with gve_setup_rx_buffer.
+ */
+ while (i--)
+ page_ref_sub(rx->data.page_info[i].page,
+ rx->data.page_info[i].pagecnt_bias - 1);
+
+ gve_unassign_qpl(priv, rx->data.qpl->id);
+ rx->data.qpl = NULL;
+
+ return err;
+
+alloc_err_rda:
while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
&rx->data.page_info[i],
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 6e310a539467..55bb0b5310d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -580,7 +580,6 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
-#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@@ -603,6 +602,7 @@ struct i40e_pf {
* in abilities field of i40e_aq_set_phy_config structure
*/
#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index eeef20f77106..1b493854f522 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1082,7 +1082,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
I40E_PFLAN_QALLOC_LASTQ_SHIFT;
- if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+ if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
num_queues = (j - base_queue) + 1;
else
num_queues = 0;
@@ -1092,7 +1092,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
- if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+ if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
num_vfs = (j - i) + 1;
else
num_vfs = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0b3a27f118fb..b047c587629b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2544,7 +2544,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
rx_buffer = i40e_rx_bi(rx_ring, ntp);
i40e_inc_ntp(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
- cleaned_count++;
+ /* Update ntc and bump cleaned count if not in the
+ * middle of mb packet.
+ */
+ if (rx_ring->next_to_clean == ntp) {
+ rx_ring->next_to_clean =
+ rx_ring->next_to_process;
+ cleaned_count++;
+ }
continue;
}
@@ -2847,7 +2854,7 @@ tx_only:
return budget;
}
- if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Exit the polling mode, but don't re-enable interrupts if stack might
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 37f41c8a682f..7d991e4d9b89 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -437,12 +437,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 next_to_process = rx_ring->next_to_process;
u16 next_to_clean = rx_ring->next_to_clean;
- u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
struct xdp_buff *first = NULL;
+ u32 count = rx_ring->count;
struct bpf_prog *xdp_prog;
+ u32 entries_to_alloc;
bool failure = false;
- u16 cleaned_count;
if (next_to_process != next_to_clean)
first = *i40e_rx_bi(rx_ring, next_to_clean);
@@ -475,7 +475,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
qword);
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_free(bi);
- next_to_process = (next_to_process + 1) & count_mask;
+ if (++next_to_process == count)
+ next_to_process = 0;
continue;
}
@@ -493,7 +494,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
break;
- next_to_process = (next_to_process + 1) & count_mask;
+ if (++next_to_process == count)
+ next_to_process = 0;
if (i40e_is_non_eop(rx_ring, rx_desc))
continue;
@@ -513,10 +515,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
rx_ring->next_to_clean = next_to_clean;
rx_ring->next_to_process = next_to_process;
- cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE)
- failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ entries_to_alloc = I40E_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc >= I40E_RX_BUFFER_WRITE)
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -752,14 +754,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 6a2e6d64bc3a..b3434dbc90d6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1437,9 +1437,9 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
if (!list_empty(&adapter->adv_rss_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
- adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
+ adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
@@ -4982,8 +4982,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->finish_config, iavf_finish_config);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
@@ -4994,6 +4992,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ /* Initialization goes on in the work. Do not add more of it below. */
return 0;
err_ioremap:
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 7bf9b7069754..73bbf06a76db 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1201,8 +1201,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
static void
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index c8286adae946..7784135160fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <generated/utsrelease.h>
+#include <linux/crash_dump.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
@@ -4683,6 +4684,9 @@ static void ice_init_features(struct ice_pf *pf)
static void ice_deinit_features(struct ice_pf *pf)
{
+ if (ice_is_safe_mode(pf))
+ return;
+
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -5014,6 +5018,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return -EINVAL;
}
+ /* when under a kdump kernel initiate a reset before enabling the
+ * device in order to clear out any pending DMA transactions. These
+ * transactions can cause some systems to machine check when doing
+ * the pcim_enable_device() below.
+ */
+ if (is_kdump_kernel()) {
+ pci_save_state(pdev);
+ pci_clear_master(pdev);
+ err = pcie_flr(pdev);
+ if (err)
+ return err;
+ pci_restore_state(pdev);
+ }
+
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 319ed601eaa1..4ee849985e2b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if (err)
goto err_out_w_lock;
- igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ if (err)
+ goto err_out_input_filter;
spin_unlock(&adapter->nfc_lock);
return 0;
+err_out_input_filter:
+ igb_erase_filter(adapter, input);
err_out_w_lock:
spin_unlock(&adapter->nfc_lock);
err_out:
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 7ab6dd58e400..dd8a9d27a167 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1817,7 +1817,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- u32 advertising;
+ u16 advertised = 0;
/* When adapter in resetting mode, autoneg/speed/duplex
* cannot be changed
@@ -1842,18 +1842,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
- * We have to check this and convert it to ADVERTISE_2500_FULL
- * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
- */
- if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
- advertising |= ADVERTISE_2500_FULL;
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 2500baseT_Full))
+ advertised |= ADVERTISE_2500_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 1000baseT_Full))
+ advertised |= ADVERTISE_1000_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Full))
+ advertised |= ADVERTISE_100_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Half))
+ advertised |= ADVERTISE_100_HALF;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Full))
+ advertised |= ADVERTISE_10_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Half))
+ advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- hw->phy.autoneg_advertised = advertising;
+ hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index dbc518ff8276..5b46ca47c8e5 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -715,20 +715,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
hw_desc->dptr = tx_buffer->sglist_dma;
}
- /* Flush the hw descriptor before writing to doorbell */
- wmb();
-
- /* Ring Doorbell to notify the NIC there is a new packet */
- writel(1, iq->doorbell_reg);
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ skb_tx_timestamp(skb);
atomic_inc(&iq->instr_pending);
wi++;
if (wi == iq->max_count)
wi = 0;
iq->host_write_index = wi;
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
- netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
iq->stats.instr_posted++;
- skb_tx_timestamp(skb);
return NETDEV_TX_OK;
dma_map_sg_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index afb348579577..c22b0ad0c870 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -2186,52 +2186,23 @@ static u16 cmdif_rev(struct mlx5_core_dev *dev)
int mlx5_cmd_init(struct mlx5_core_dev *dev)
{
- int size = sizeof(struct mlx5_cmd_prot_block);
- int align = roundup_pow_of_two(size);
struct mlx5_cmd *cmd = &dev->cmd;
- u32 cmd_l;
- int err;
-
- cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool)
- return -ENOMEM;
- err = alloc_cmd_page(dev, cmd);
- if (err)
- goto err_free_pool;
-
- cmd_l = (u32)(cmd->dma);
- if (cmd_l & 0xfff) {
- mlx5_core_err(dev, "invalid command queue address\n");
- err = -ENOMEM;
- goto err_cmd_page;
- }
cmd->checksum_disabled = 1;
spin_lock_init(&cmd->alloc_lock);
spin_lock_init(&cmd->token_lock);
- create_msg_cache(dev);
-
set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name);
if (!cmd->wq) {
mlx5_core_err(dev, "failed to create command workqueue\n");
- err = -ENOMEM;
- goto err_cache;
+ return -ENOMEM;
}
mlx5_cmdif_debugfs_init(dev);
return 0;
-
-err_cache:
- destroy_msg_cache(dev);
-err_cmd_page:
- free_cmd_page(dev, cmd);
-err_free_pool:
- dma_pool_destroy(cmd->pool);
- return err;
}
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
@@ -2240,15 +2211,15 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
mlx5_cmdif_debugfs_cleanup(dev);
destroy_workqueue(cmd->wq);
- destroy_msg_cache(dev);
- free_cmd_page(dev, cmd);
- dma_pool_destroy(cmd->pool);
}
int mlx5_cmd_enable(struct mlx5_core_dev *dev)
{
+ int size = sizeof(struct mlx5_cmd_prot_block);
+ int align = roundup_pow_of_two(size);
struct mlx5_cmd *cmd = &dev->cmd;
u32 cmd_h, cmd_l;
+ int err;
memset(&cmd->vars, 0, sizeof(cmd->vars));
cmd->vars.cmdif_rev = cmdif_rev(dev);
@@ -2281,10 +2252,21 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
sema_init(&cmd->vars.pages_sem, 1);
sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
+ cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
+ if (!cmd->pool)
+ return -ENOMEM;
+
+ err = alloc_cmd_page(dev, cmd);
+ if (err)
+ goto err_free_pool;
+
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
- if (WARN_ON(cmd_l & 0xfff))
- return -EINVAL;
+ if (cmd_l & 0xfff) {
+ mlx5_core_err(dev, "invalid command queue address\n");
+ err = -ENOMEM;
+ goto err_cmd_page;
+ }
iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
@@ -2297,17 +2279,27 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
cmd->mode = CMD_MODE_POLLING;
cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
+ create_msg_cache(dev);
create_debugfs_files(dev);
return 0;
+
+err_cmd_page:
+ free_cmd_page(dev, cmd);
+err_free_pool:
+ dma_pool_destroy(cmd->pool);
+ return err;
}
void mlx5_cmd_disable(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
- clean_debug_files(dev);
flush_workqueue(cmd->wq);
+ clean_debug_files(dev);
+ destroy_msg_cache(dev);
+ free_cmd_page(dev, cmd);
+ dma_pool_destroy(cmd->pool);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 7c0f2adbea00..ad789349c06e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -848,7 +848,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
- tracer->owner = false;
+ mlx5_fw_tracer_ownership_acquire(tracer);
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 0fef853eab62..5d128c5b4529 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -467,6 +467,17 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
/* only handle the event on peers */
if (mlx5_esw_bridge_is_local(dev, rep, esw))
break;
+
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+ /* Mark for deletion to prevent the update wq task from
+ * spuriously refreshing the entry which would mark it again as
+ * offloaded in SW bridge. After this fallthrough to regular
+ * async delete code.
+ */
+ mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads,
+ fdb_info);
fallthrough;
case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 1730f6a716ee..b10e40e1a9c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -24,7 +24,8 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
- if (!route_dev || !netif_is_ovs_master(route_dev))
+ if (!route_dev || !netif_is_ovs_master(route_dev) ||
+ attr->parse_attr->filter_dev == e->out_dev)
goto out;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 12f56d0db0af..8bed17d8fe56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -874,11 +874,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
out:
- if (flags & XDP_XMIT_FLUSH) {
- if (sq->mpwqe.wqe)
- mlx5e_xdp_mpwqe_complete(sq);
+ if (sq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(sq);
+
+ if (flags & XDP_XMIT_FLUSH)
mlx5e_xmit_xdp_doorbell(sq);
- }
return nxmit;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2fdb8895aecd..fd1cce542b68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -701,7 +701,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
- memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
+ mlx5e_stats_copy_rep_stats(stats, &priv->stats.rep_stats);
}
static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
@@ -769,6 +769,7 @@ static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
static void mlx5e_build_rep_params(struct net_device *netdev)
{
+ const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
@@ -794,8 +795,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* RQ */
mlx5e_build_rq_params(mdev, params);
+ /* If netdev is already registered (e.g. move from nic profile to uplink,
+ * RTNL lock must be held before triggering netdev notifiers.
+ */
+ if (take_rtnl)
+ rtnl_lock();
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
+ if (take_rtnl)
+ rtnl_unlock();
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3fd11b0761e0..8d9743a5e42c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -457,26 +457,41 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
{
int remaining = wqe_bulk;
- int i = 0;
+ int total_alloc = 0;
+ int refill_alloc;
+ int refill;
/* The WQE bulk is split into smaller bulks that are sized
* according to the page pool cache refill size to avoid overflowing
* the page pool cache due to too many page releases at once.
*/
do {
- int refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
- int alloc_count;
+ refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
- mlx5e_free_rx_wqes(rq, ix + i, refill);
- alloc_count = mlx5e_alloc_rx_wqes(rq, ix + i, refill);
- i += alloc_count;
- if (unlikely(alloc_count != refill))
- break;
+ mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
+ refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
+ if (unlikely(refill_alloc != refill))
+ goto err_free;
+ total_alloc += refill_alloc;
remaining -= refill;
} while (remaining);
- return i;
+ return total_alloc;
+
+err_free:
+ mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
+
+ for (int i = 0; i < total_alloc + refill; i++) {
+ int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
+ struct mlx5e_wqe_frag_info *frag;
+
+ frag = get_frag(rq, j);
+ for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
+ frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
+ }
+
+ return 0;
}
static void
@@ -816,6 +831,8 @@ err_unmap:
mlx5e_page_release_fragmented(rq, frag_page);
}
+ bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
+
err:
rq->stats->buff_alloc_err++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 176fa5976259..477c547dcc04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -484,11 +484,20 @@ struct mlx5e_stats {
struct mlx5e_vnic_env_stats vnic;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
- struct rtnl_link_stats64 vf_vport;
struct mlx5e_pcie_stats pcie;
struct mlx5e_rep_stats rep_stats;
};
+static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport,
+ struct mlx5e_rep_stats *rep_stats)
+{
+ memset(vf_vport, 0, sizeof(*vf_vport));
+ vf_vport->rx_packets = rep_stats->vport_rx_packets;
+ vf_vport->tx_packets = rep_stats->vport_tx_packets;
+ vf_vport->rx_bytes = rep_stats->vport_rx_bytes;
+ vf_vport->tx_bytes = rep_stats->vport_tx_bytes;
+}
+
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index c24828b688ac..c8590483ddc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -4972,7 +4972,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
+ mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats,
+ &priv->stats.rep_stats);
break;
default:
NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
@@ -5012,7 +5013,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
u64 dbytes;
u64 dpkts;
- cur_stats = priv->stats.vf_vport;
+ mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
rpriv->prev_vf_vport_stats = cur_stats;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index e36294b7ade2..1b9bc32efd6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1748,6 +1748,28 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
entry->lastuse = jiffies;
}
+void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ struct mlx5_esw_bridge *bridge;
+
+ bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+ if (!bridge)
+ return;
+
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
+ if (!entry) {
+ esw_debug(br_offloads->esw->dev,
+ "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ fdb_info->addr, fdb_info->vid, vport_num);
+ return;
+ }
+
+ entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED;
+}
+
void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info)
@@ -1810,7 +1832,8 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
unsigned long lastuse =
(unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
- if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
+ if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER |
+ MLX5_ESW_BRIDGE_FLAG_DELETED))
continue;
if (time_after(lastuse, entry->lastuse))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index c2c7c70d99eb..d6f539161993 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -62,6 +62,9 @@ int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_nu
void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+ struct mlx5_esw_bridge_offloads *br_offloads,
+ struct switchdev_notifier_fdb_info *fdb_info);
void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct switchdev_notifier_fdb_info *fdb_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index 4911cc32161b..7c251af566c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -133,6 +133,7 @@ struct mlx5_esw_bridge_mdb_key {
enum {
MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1),
+ MLX5_ESW_BRIDGE_FLAG_DELETED = BIT(2),
};
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d4cde6555063..8d0b915a3121 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1038,11 +1038,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
return ERR_PTR(err);
}
-static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
+static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
{
- MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
- mlx5_eq_notifier_register(esw->dev, &esw->nb);
-
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
ESW_FUNCTIONS_CHANGED);
@@ -1050,13 +1047,11 @@ static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
}
}
-static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
+static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw)
{
if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
- mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
-
flush_workqueue(esw->work_queue);
}
@@ -1483,6 +1478,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
+ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+ mlx5_eq_notifier_register(esw->dev, &esw->nb);
+
if (esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
@@ -1495,7 +1493,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
- mlx5_eswitch_event_handlers_register(esw);
+ mlx5_eswitch_event_handler_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@@ -1622,7 +1620,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
*/
mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
- mlx5_eswitch_event_handlers_unregister(esw);
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+ mlx5_eswitch_event_handler_unregister(esw);
esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 717a0b3f89bd..ab5ef254a748 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt,
static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
u8 **data, dma_addr_t *phys_addr)
{
- *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+ size_t size = cdev->ll2->rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ *data = kmalloc(size, GFP_ATOMIC);
if (!(*data)) {
DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
return -ENOMEM;
@@ -2589,7 +2592,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
INIT_LIST_HEAD(&cdev->ll2->list);
spin_lock_init(&cdev->ll2->lock);
- cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+ cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN +
L1_CACHE_BYTES + params->mtu;
/* Allocate memory for LL2.
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 6351a2dc13bc..361b90007148 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4364,7 +4364,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
unsigned int entry = dirty_tx % NUM_TX_DESC;
u32 status;
- status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
if (status & DescOwn)
break;
@@ -4394,7 +4394,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* If skb is NULL then we come here again once a tx irq is
* triggered after the last fragment is marked transmitted.
*/
- if (tp->cur_tx != dirty_tx && skb)
+ if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
rtl8169_doorbell(tp);
}
}
@@ -4427,7 +4427,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
dma_addr_t addr;
u32 status;
- status = le32_to_cpu(desc->opts1);
+ status = le32_to_cpu(READ_ONCE(desc->opts1));
if (status & DescOwn)
break;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 834f000ba1c4..30ebef88248d 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -629,14 +629,14 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
}
if (child_ip_tos_mask != old->child_ip_tos_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x",
+ "Pseudo encap match for TOS mask %#04x conflicts with existing mask %#04x",
child_ip_tos_mask,
old->child_ip_tos_mask);
return -EEXIST;
}
if (child_udp_sport_mask != old->child_udp_sport_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x",
+ "Pseudo encap match for UDP src port mask %#x conflicts with existing mask %#x",
child_udp_sport_mask,
old->child_udp_sport_mask);
return -EEXIST;
@@ -1081,7 +1081,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement ttl twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@@ -1106,7 +1106,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement hoplimit twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@@ -1120,7 +1120,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
}
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: ttl add action type %x %x %x/%x",
+ "ttl add action type %x %x %x/%x is not supported",
fa->mangle.htype, fa->mangle.offset,
fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
@@ -1164,7 +1164,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 0:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth.dst32 mangle",
+ "mask (%#x) of eth.dst32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1184,7 +1184,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->dst_mac_16 = 1;
} else {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
+ "mask (%#x) of eth+4 mangle is not high or low 16b",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1192,7 +1192,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 8:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth.src32 mangle",
+ "mask (%#x) of eth.src32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1201,7 +1201,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->src_mac_32 = 1;
return efx_tc_complete_mac_mangle(efx, act, mung, extack);
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "mangle eth+%u %x/%x is not supported",
fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1217,7 +1217,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
+ "mask (%#x) out of range, only support mangle action on ipv4.ttl",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1227,7 +1227,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
+ "only support mangle ttl when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@@ -1237,7 +1237,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: we cannot decrement ttl past 0");
+ "decrement ttl past 0 is not supported");
return -EOPNOTSUPP;
}
@@ -1245,7 +1245,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: multiple dec ttl");
+ "multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@@ -1259,7 +1259,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle on the ttl field (offset is %u)",
+ "only support mangle on the ttl field (offset is %u)",
fa->mangle.offset);
return -EOPNOTSUPP;
}
@@ -1275,7 +1275,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
+ "mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
fa->mangle.mask);
return -EOPNOTSUPP;
@@ -1286,7 +1286,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
+ "only support hop_limit when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@@ -1296,7 +1296,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: we cannot decrement hop_limit past 0");
+ "decrementing hop_limit past 0 is not supported");
return -EOPNOTSUPP;
}
@@ -1304,7 +1304,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: multiple dec ttl");
+ "multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@@ -1318,7 +1318,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle on the hop_limit field");
+ "only support mangle on the hop_limit field");
return -EOPNOTSUPP;
}
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ed1a5a31a491..5801f4d50f95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1197,6 +1197,17 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
+static void stmmac_set_half_duplex(struct stmmac_priv *priv)
+{
+ /* Half-Duplex can only work with single tx queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->phylink_config.mac_capabilities |=
+ (MAC_10HD | MAC_100HD | MAC_1000HD);
+}
+
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1228,10 +1239,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
MAC_10FD | MAC_100FD |
MAC_1000FD;
- /* Half-Duplex can only work with single queue */
- if (priv->plat->tx_queues_to_use <= 1)
- priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
- MAC_1000HD;
+ stmmac_set_half_duplex(priv);
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
@@ -7172,6 +7180,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
+ stmmac_set_half_duplex(priv);
stmmac_napi_add(dev);
if (netif_running(dev))
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0a3346650e03..cac61f5d3fd4 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -90,12 +90,16 @@ config TI_CPTS
The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
driver offers a PTP Hardware Clock.
+config TI_K3_CPPI_DESC_POOL
+ tristate
+
config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
help
@@ -187,6 +191,7 @@ config TI_ICSSG_PRUETH
tristate "TI Gigabit PRU Ethernet driver"
select PHYLIB
select TI_ICSS_IEP
+ select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
help
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 34fd7a716ba6..67bed861f31d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -24,14 +24,15 @@ keystone_netcp-y := netcp_core.o cpsw_ale.o
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
+obj-$(CONFIG_TI_K3_CPPI_DESC_POOL) += k3-cppi-desc-pool.o
+
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
-icssg-prueth-y := k3-cppi-desc-pool.o \
- icssg/icssg_prueth.o \
+icssg-prueth-y := icssg/icssg_prueth.o \
icssg/icssg_classifier.o \
icssg/icssg_queues.o \
icssg/icssg_config.o \
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 933b84666574..b272361e378f 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -379,9 +379,9 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
/* Bitmask for ICSSG r30 commands */
static const struct icssg_r30_cmd emac_r32_bitmask[] = {
- {{0xffff0004, 0xffff0100, 0xffff0100, EMAC_NONE}}, /* EMAC_PORT_DISABLE */
+ {{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}}, /* EMAC_PORT_DISABLE */
{{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}}, /* EMAC_PORT_BLOCK */
- {{0xffbb0000, 0xfcff0000, 0xdcff0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */
+ {{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}}, /* EMAC_PORT_FORWARD */
{{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}}, /* EMAC_PORT_FORWARD_WO_LEARNING */
{{0xffff0001, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT ALL */
{{0xfffe0002, EMAC_NONE, EMAC_NONE, EMAC_NONE}}, /* ACCEPT TAGGED */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index bb0b33927e3b..3dbadddd7e35 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -9,6 +9,9 @@
#include "icssg_stats.h"
#include <linux/regmap.h>
+#define ICSSG_TX_PACKET_OFFSET 0xA0
+#define ICSSG_TX_BYTE_OFFSET 0xEC
+
static u32 stats_base[] = { 0x54c, /* Slice 0 stats start */
0xb18, /* Slice 1 stats start */
};
@@ -18,6 +21,7 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
u32 base = stats_base[slice];
+ u32 tx_pkt_cnt = 0;
u32 val;
int i;
@@ -29,7 +33,12 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
base + icssg_all_stats[i].offset,
val);
+ if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
+ tx_pkt_cnt = val;
+
emac->stats[i] += val;
+ if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
+ emac->stats[i] -= tx_pkt_cnt * 8;
}
}
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
index 38cc12f9f133..05cc7aab1ec8 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -39,6 +39,7 @@ void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
gen_pool_destroy(pool->gen_pool); /* frees pool->name */
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_destroy);
struct k3_cppi_desc_pool *
k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
@@ -98,29 +99,38 @@ gen_pool_create_fail:
devm_kfree(pool->dev, pool);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_create_name);
dma_addr_t k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool,
void *addr)
{
return addr ? pool->dma_addr + (addr - pool->cpumem) : 0;
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_virt2dma);
void *k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma)
{
return dma ? pool->cpumem + (dma - pool->dma_addr) : NULL;
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_dma2virt);
void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool)
{
return (void *)gen_pool_alloc(pool->gen_pool, pool->desc_size);
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_alloc);
void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr)
{
gen_pool_free(pool->gen_pool, (unsigned long)addr, pool->desc_size);
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_free);
size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
{
return gen_pool_avail(pool->gen_pool) / pool->desc_size;
}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_avail);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 CPPI5 descriptors pool API");
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index dc14a66583ff..44488c153ea2 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
key_index = wl->current_key;
if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
- /* reques to change default key index */
+ /* request to change default key index */
pr_debug("%s: request to change default key to %d\n",
__func__, key_index);
wl->current_key = key_index;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 144ec626230d..b22596b18ee8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -872,8 +872,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
- mtu < ntohs(iph->tot_len)) {
+ if (iph->frag_off & htons(IP_DF) &&
+ ((!skb_is_gso(skb) && skb->len > mtu) ||
+ (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index a03490ba2e5b..cc7ddc40020f 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
static void adf7242_debugfs_init(struct adf7242_local *lp)
{
- char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
+ char debugfs_dir_name[DNAME_INLINE_LEN + 1];
- strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
+ snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
+ "adf7242-%s", dev_name(&lp->spi->dev));
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
diff --git a/drivers/net/mdio/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index a881e3523328..bef4cce71287 100644
--- a/drivers/net/mdio/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
@@ -55,6 +55,27 @@ out:
return r;
}
+static int mdio_mux_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct mdio_mux_child_bus *cb = bus->priv;
+ struct mdio_mux_parent_bus *pb = cb->parent;
+ int r;
+
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
+ r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+ if (r)
+ goto out;
+
+ pb->current_child = cb->bus_number;
+
+ r = pb->mii_bus->read_c45(pb->mii_bus, phy_id, dev_addr, regnum);
+out:
+ mutex_unlock(&pb->mii_bus->mdio_lock);
+
+ return r;
+}
+
/*
* The parent bus' lock is used to order access to the switch_fn.
*/
@@ -80,6 +101,28 @@ out:
return r;
}
+static int mdio_mux_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 val)
+{
+ struct mdio_mux_child_bus *cb = bus->priv;
+ struct mdio_mux_parent_bus *pb = cb->parent;
+
+ int r;
+
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
+ r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+ if (r)
+ goto out;
+
+ pb->current_child = cb->bus_number;
+
+ r = pb->mii_bus->write_c45(pb->mii_bus, phy_id, dev_addr, regnum, val);
+out:
+ mutex_unlock(&pb->mii_bus->mdio_lock);
+
+ return r;
+}
+
static int parent_count;
static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
@@ -173,6 +216,10 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->parent = dev;
cb->mii_bus->read = mdio_mux_read;
cb->mii_bus->write = mdio_mux_write;
+ if (parent_bus->read_c45)
+ cb->mii_bus->read_c45 = mdio_mux_read_c45;
+ if (parent_bus->write_c45)
+ cb->mii_bus->write_c45 = mdio_mux_write_c45;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
mdiobus_free(cb->mii_bus);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 8478b081c058..97638ba7ae85 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -894,6 +894,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.name = _name, \
/* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
+ .get_sset_count = bcm_phy_get_sset_count, \
+ .get_strings = bcm_phy_get_strings, \
+ .get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
.config_init = bcm7xxx_16nm_ephy_config_init, \
.config_aneg = genphy_config_aneg, \
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 89ab9efe522c..afa5497f7c35 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -3073,10 +3073,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct net *net = sock_net(&tfile->sk);
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
- unsigned int ifindex, carrier;
+ unsigned int carrier;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
+ int ifindex;
int sndbuf;
int vnet_hdr_sz;
int le;
@@ -3132,7 +3133,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
goto unlock;
-
+ ret = -EINVAL;
+ if (ifindex < 0)
+ goto unlock;
ret = 0;
tfile->ifindex = ifindex;
goto unlock;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0c13d9950cd8..afb20c0ed688 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -764,7 +764,7 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
- RTL8152_UNPLUG = 0,
+ RTL8152_INACCESSIBLE = 0,
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
@@ -773,6 +773,9 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
RX_EPROTO,
+ IN_PRE_RESET,
+ PROBED_WITH_NO_ERRORS,
+ PROBE_SHOULD_RETRY,
};
#define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e
@@ -953,6 +956,8 @@ struct r8152 {
u8 version;
u8 duplex;
u8 autoneg;
+
+ unsigned int reg_access_reset_count;
};
/**
@@ -1200,6 +1205,96 @@ static unsigned int agg_buf_sz = 16384;
#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc))
+/* If register access fails then we block access and issue a reset. If this
+ * happens too many times in a row without a successful access then we stop
+ * trying to reset and just leave access blocked.
+ */
+#define REGISTER_ACCESS_MAX_RESETS 3
+
+static void rtl_set_inaccessible(struct r8152 *tp)
+{
+ set_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ smp_mb__after_atomic();
+}
+
+static void rtl_set_accessible(struct r8152 *tp)
+{
+ clear_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ smp_mb__after_atomic();
+}
+
+static
+int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index, void *data,
+ __u16 size, const char *msg_tag)
+{
+ struct usb_device *udev = tp->udev;
+ int ret;
+
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ ret = usb_control_msg(udev, pipe, request, requesttype,
+ value, index, data, size,
+ USB_CTRL_GET_TIMEOUT);
+
+ /* No need to issue a reset to report an error if the USB device got
+ * unplugged; just return immediately.
+ */
+ if (ret == -ENODEV)
+ return ret;
+
+ /* If the write was successful then we're done */
+ if (ret >= 0) {
+ tp->reg_access_reset_count = 0;
+ return ret;
+ }
+
+ dev_err(&udev->dev,
+ "Failed to %s %d bytes at %#06x/%#06x (%d)\n",
+ msg_tag, size, value, index, ret);
+
+ /* Block all future register access until we reset. Much of the code
+ * in the driver doesn't check for errors. Notably, many parts of the
+ * driver do a read/modify/write of a register value without
+ * confirming that the read succeeded. Writing back modified garbage
+ * like this can fully wedge the adapter, requiring a power cycle.
+ */
+ rtl_set_inaccessible(tp);
+
+ /* If probe hasn't yet finished, then we'll request a retry of the
+ * whole probe routine if we get any control transfer errors. We
+ * never have to clear this bit since we free/reallocate the whole "tp"
+ * structure if we retry probe.
+ */
+ if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) {
+ set_bit(PROBE_SHOULD_RETRY, &tp->flags);
+ return ret;
+ }
+
+ /* Failing to access registers in pre-reset is not surprising since we
+ * wouldn't be resetting if things were behaving normally. The register
+ * access we do in pre-reset isn't truly mandatory--we're just reusing
+ * the disable() function and trying to be nice by powering the
+ * adapter down before resetting it. Thus, if we're in pre-reset,
+ * we'll return right away and not try to queue up yet another reset.
+ * We know the post-reset is already coming.
+ */
+ if (test_bit(IN_PRE_RESET, &tp->flags))
+ return ret;
+
+ if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) {
+ usb_queue_reset_device(tp->intf);
+ tp->reg_access_reset_count++;
+ } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) {
+ dev_err(&udev->dev,
+ "Tried to reset %d times; giving up.\n",
+ REGISTER_ACCESS_MAX_RESETS);
+ }
+
+ return ret;
+}
+
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
@@ -1210,9 +1305,10 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
- RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- value, index, tmp, size, 500);
+ ret = r8152_control_msg(tp, tp->pipe_ctrl_in,
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ value, index, tmp, size, "read");
+
if (ret < 0)
memset(data, 0xff, size);
else
@@ -1233,9 +1329,9 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
- RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
- value, index, tmp, size, 500);
+ ret = r8152_control_msg(tp, tp->pipe_ctrl_out,
+ RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
+ value, index, tmp, size, "write");
kfree(tmp);
@@ -1244,10 +1340,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
static void rtl_set_unplug(struct r8152 *tp)
{
- if (tp->udev->state == USB_STATE_NOTATTACHED) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
- smp_mb__after_atomic();
- }
+ if (tp->udev->state == USB_STATE_NOTATTACHED)
+ rtl_set_inaccessible(tp);
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -1256,7 +1350,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
u16 limit = 64;
int ret = 0;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@@ -1300,7 +1394,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 byteen_start, byteen_end, byen;
u16 limit = 512;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@@ -1537,7 +1631,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
struct r8152 *tp = netdev_priv(netdev);
int ret;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
if (phy_id != R8152_PHY_ID)
@@ -1553,7 +1647,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
{
struct r8152 *tp = netdev_priv(netdev);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (phy_id != R8152_PHY_ID)
@@ -1758,7 +1852,7 @@ static void read_bulk_callback(struct urb *urb)
if (!tp)
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1850,7 +1944,7 @@ static void write_bulk_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!skb_queue_empty(&tp->tx_queue))
@@ -1871,7 +1965,7 @@ static void intr_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
switch (status) {
@@ -2615,7 +2709,7 @@ static void bottom_half(struct tasklet_struct *t)
{
struct r8152 *tp = from_tasklet(tp, t, tx_tl);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -2658,7 +2752,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
int ret;
/* The rx would be stopped, so skip submitting */
- if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) ||
!test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
return 0;
@@ -3058,7 +3152,7 @@ static int rtl_enable(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -3145,7 +3239,7 @@ static int rtl8153_enable(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -3177,7 +3271,7 @@ static void rtl_disable(struct r8152 *tp)
u32 ocp_data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -3631,7 +3725,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
}
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -3663,6 +3757,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@@ -3703,6 +3799,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@@ -4046,6 +4144,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
for (i = 0; wait && i < 5000; i++) {
u32 ocp_data;
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
usleep_range(1000, 2000);
ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
if ((ocp_data & PATCH_READY) ^ check)
@@ -6002,7 +6103,7 @@ static int rtl8156_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
r8156_fc_parameter(tp);
@@ -6060,7 +6161,7 @@ static int rtl8156b_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -6246,7 +6347,7 @@ out:
static void rtl8152_up(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8152_aldps_en(tp, false);
@@ -6256,7 +6357,7 @@ static void rtl8152_up(struct r8152 *tp)
static void rtl8152_down(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6271,7 +6372,7 @@ static void rtl8153_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@@ -6311,7 +6412,7 @@ static void rtl8153_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6332,7 +6433,7 @@ static void rtl8153b_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6356,7 +6457,7 @@ static void rtl8153b_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6393,7 +6494,7 @@ static void rtl8153c_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6474,7 +6575,7 @@ static void rtl8156_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6547,7 +6648,7 @@ static void rtl8156_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6685,7 +6786,7 @@ static void rtl_work_func_t(struct work_struct *work)
/* If the device is unplugged or !netif_running(), the workqueue
* doesn't need to wake the device, and could return directly.
*/
- if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6724,7 +6825,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6851,7 +6952,7 @@ static int rtl8152_close(struct net_device *netdev)
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
- if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
rtl_stop_rx(tp);
} else {
@@ -6884,7 +6985,7 @@ static void r8152b_init(struct r8152 *tp)
u32 ocp_data;
u16 data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
data = r8152_mdio_read(tp, MII_BMCR);
@@ -6928,7 +7029,7 @@ static void r8153_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@@ -6939,7 +7040,7 @@ static void r8153_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -7068,7 +7169,7 @@ static void r8153b_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -7079,7 +7180,7 @@ static void r8153b_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -7150,7 +7251,7 @@ static void r8153c_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -7170,7 +7271,7 @@ static void r8153c_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -7999,7 +8100,7 @@ static void r8156_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8020,7 +8121,7 @@ static void r8156_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -8095,7 +8196,7 @@ static void r8156b_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8129,7 +8230,7 @@ static void r8156b_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -8255,7 +8356,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
- if (!tp)
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
netdev = tp->netdev;
@@ -8270,7 +8371,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
+ set_bit(IN_PRE_RESET, &tp->flags);
tp->rtl_ops.disable(tp);
+ clear_bit(IN_PRE_RESET, &tp->flags);
mutex_unlock(&tp->control);
}
@@ -8283,9 +8386,11 @@ static int rtl8152_post_reset(struct usb_interface *intf)
struct net_device *netdev;
struct sockaddr sa;
- if (!tp)
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
+ rtl_set_accessible(tp);
+
/* reset the MAC address in case of policy change */
if (determine_ethernet_addr(tp, &sa) >= 0) {
rtnl_lock();
@@ -9158,7 +9263,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *data = if_mii(rq);
int res;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
res = usb_autopm_get_interface(tp->intf);
@@ -9260,7 +9365,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
static void rtl8152_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (tp->version != RTL_VER_01)
@@ -9269,7 +9374,7 @@ static void rtl8152_unload(struct r8152 *tp)
static void rtl8153_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_power_cut_en(tp, false);
@@ -9277,7 +9382,7 @@ static void rtl8153_unload(struct r8152 *tp)
static void rtl8153b_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_power_cut_en(tp, false);
@@ -9487,16 +9592,29 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev)
__le32 *tmp;
u8 version;
int ret;
+ int i;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return 0;
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret > 0)
- ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ /* Retry up to 3 times in case there is a transitory error. We do this
+ * since retrying a read of the version is always safe and this
+ * function doesn't take advantage of r8152_control_msg().
+ */
+ for (i = 0; i < 3; i++) {
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
+ USB_CTRL_GET_TIMEOUT);
+ if (ret > 0) {
+ ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ break;
+ }
+ }
+
+ if (i != 0 && ret > 0)
+ dev_warn(&udev->dev, "Needed %d retries to read version\n", i);
kfree(tmp);
@@ -9595,25 +9713,14 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
return 0;
}
-static int rtl8152_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int rtl8152_probe_once(struct usb_interface *intf,
+ const struct usb_device_id *id, u8 version)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
- u8 version;
int ret;
- if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
- return -ENODEV;
-
- if (!rtl_check_vendor_ok(intf))
- return -ENODEV;
-
- version = rtl8152_get_version(intf);
- if (version == RTL_VER_UNKNOWN)
- return -ENODEV;
-
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -9776,18 +9883,68 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
+ /* If we saw a control transfer error while probing then we may
+ * want to try probe() again. Consider this an error.
+ */
+ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+ goto out2;
+
+ set_bit(PROBED_WITH_NO_ERRORS, &tp->flags);
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
+out2:
+ unregister_netdev(netdev);
+
out1:
tasklet_kill(&tp->tx_tl);
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ if (tp->rtl_ops.unload)
+ tp->rtl_ops.unload(tp);
+ rtl8152_release_firmware(tp);
usb_set_intfdata(intf, NULL);
out:
+ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+ ret = -EAGAIN;
+
free_netdev(netdev);
return ret;
}
+#define RTL8152_PROBE_TRIES 3
+
+static int rtl8152_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ u8 version;
+ int ret;
+ int i;
+
+ if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+ return -ENODEV;
+
+ if (!rtl_check_vendor_ok(intf))
+ return -ENODEV;
+
+ version = rtl8152_get_version(intf);
+ if (version == RTL_VER_UNKNOWN)
+ return -ENODEV;
+
+ for (i = 0; i < RTL8152_PROBE_TRIES; i++) {
+ ret = rtl8152_probe_once(intf, id, version);
+ if (ret != -EAGAIN)
+ break;
+ }
+ if (ret == -EAGAIN) {
+ dev_err(&intf->dev,
+ "r8152 failed probe after %d tries; giving up\n", i);
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 563ecd27b93e..a530f20ee257 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (ret < 0) {
+ if (ret < 4) {
+ ret = ret < 0 ? ret : -ENODATA;
+
if (ret != -ENODEV)
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
@@ -897,7 +899,7 @@ static int smsc95xx_reset(struct usbnet *dev)
if (timeout >= 100) {
netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
- return ret;
+ return -ETIMEDOUT;
}
ret = smsc95xx_set_mac_address(dev);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 635301d677e1..829515a601b3 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -4,7 +4,6 @@
*/
#include <linux/delay.h>
-#include <linux/pm_runtime.h>
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_devlink.h"
@@ -632,11 +631,6 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
/* Complete all memory stores after setting bit */
smp_mb__after_atomic();
- if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
- pm_runtime_mark_last_busy(ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_imem->dev);
- }
-
return;
err_ipc_mux_deinit:
@@ -1240,7 +1234,6 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
/* forward MDM_NOT_READY to listeners */
ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
- pm_runtime_get_sync(ipc_imem->dev);
hrtimer_cancel(&ipc_imem->td_alloc_timer);
hrtimer_cancel(&ipc_imem->tdupdate_timer);
@@ -1426,16 +1419,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
}
-
- if (!pm_runtime_enabled(ipc_imem->dev))
- pm_runtime_enable(ipc_imem->dev);
-
- pm_runtime_set_autosuspend_delay(ipc_imem->dev,
- IPC_MEM_AUTO_SUSPEND_DELAY_MS);
- pm_runtime_use_autosuspend(ipc_imem->dev);
- pm_runtime_allow(ipc_imem->dev);
- pm_runtime_mark_last_busy(ipc_imem->dev);
-
return ipc_imem;
devlink_channel_fail:
ipc_devlink_deinit(ipc_imem->ipc_devlink);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 0144b45e2afb..5664ac507c90 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -103,8 +103,6 @@ struct ipc_chnl_cfg;
#define FULLY_FUNCTIONAL 0
#define IOSM_DEVLINK_INIT 1
-#define IPC_MEM_AUTO_SUSPEND_DELAY_MS 5000
-
/* List of the supported UL/DL pipes. */
enum ipc_mem_pipes {
IPC_MEM_PIPE_0 = 0,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
index 3a259c9abefd..04517bd3325a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -6,7 +6,6 @@
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <net/rtnetlink.h>
#include "iosm_ipc_imem.h"
@@ -438,8 +437,7 @@ static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
return 0;
}
-static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb,
- ipc_pcie_resume_cb, NULL);
+static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
static struct pci_driver iosm_ipc_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
index 2ba1ddca3945..5d5b4183e14a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_port.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -3,8 +3,6 @@
* Copyright (C) 2020-21 Intel Corporation.
*/
-#include <linux/pm_runtime.h>
-
#include "iosm_ipc_chnl_cfg.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
@@ -15,16 +13,12 @@ static int ipc_port_ctrl_start(struct wwan_port *port)
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
int ret = 0;
- pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
ipc_port->chl_id,
IPC_HP_CDEV_OPEN);
if (!ipc_port->channel)
ret = -EIO;
- pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
-
return ret;
}
@@ -33,24 +27,15 @@ static void ipc_port_ctrl_stop(struct wwan_port *port)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
- pm_runtime_get_sync(ipc_port->ipc_imem->dev);
ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
- pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
}
/* transfer control data to modem */
static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
{
struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
- int ret;
- pm_runtime_get_sync(ipc_port->ipc_imem->dev);
- ret = ipc_imem_sys_cdev_write(ipc_port, skb);
- pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
-
- return ret;
+ return ipc_imem_sys_cdev_write(ipc_port, skb);
}
static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
index 4368373797b6..eeecfa3d10c5 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_trace.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
@@ -3,9 +3,7 @@
* Copyright (C) 2020-2021 Intel Corporation.
*/
-#include <linux/pm_runtime.h>
#include <linux/wwan.h>
-
#include "iosm_ipc_trace.h"
/* sub buffer size and number of sub buffer */
@@ -99,8 +97,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
if (ret)
return ret;
- pm_runtime_get_sync(ipc_trace->ipc_imem->dev);
-
mutex_lock(&ipc_trace->trc_mutex);
if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
@@ -121,10 +117,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
ret = count;
unlock:
mutex_unlock(&ipc_trace->trc_mutex);
-
- pm_runtime_mark_last_busy(ipc_trace->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_trace->ipc_imem->dev);
-
return ret;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index 93d17de08786..ff747fc79aaf 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -6,7 +6,6 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/if_link.h>
-#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
#include <linux/wwan.h>
#include <net/pkt_sched.h>
@@ -52,13 +51,11 @@ static int ipc_wwan_link_open(struct net_device *netdev)
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
int if_id = priv->if_id;
- int ret = 0;
if (if_id < IP_MUX_SESSION_START ||
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
- pm_runtime_get_sync(ipc_wwan->ipc_imem->dev);
/* get channel id */
priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
@@ -66,8 +63,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_err(ipc_wwan->dev,
"cannot connect wwan0 & id %d to the IPC mem layer",
if_id);
- ret = -ENODEV;
- goto err_out;
+ return -ENODEV;
}
/* enable tx path, DL data may follow */
@@ -76,11 +72,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
priv->ch_id, priv->if_id);
-err_out:
- pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
-
- return ret;
+ return 0;
}
/* Bring-down the wwan net link */
@@ -90,12 +82,9 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
netif_stop_queue(netdev);
- pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev);
ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
priv->ch_id);
priv->ch_id = -1;
- pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev);
return 0;
}
@@ -117,7 +106,6 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
return -EINVAL;
- pm_runtime_get(ipc_wwan->ipc_imem->dev);
/* Send the SKB to device for transmission */
ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
if_id, priv->ch_id, skb);
@@ -131,14 +119,9 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
ret = NETDEV_TX_BUSY;
dev_err(ipc_wwan->dev, "unable to push packets");
} else {
- pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
goto exit;
}
- pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
- pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
-
return ret;
exit:
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index daf5d144a8ea..064592a5d546 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvmf_auth_dhchap_success1_data *data = chap->buf;
size_t size = sizeof(*data);
- if (chap->ctrl_key)
+ if (chap->s2)
size += chap->hash_len;
if (size > CHAP_BUF_SIZE) {
@@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
goto fail2;
}
- if (chap->ctrl_key) {
+ if (chap->s2) {
/* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d8ff796fd5f2..747c879e8982 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
if (!buf)
goto out;
- ret = -EFAULT;
- if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
- goto out_free_meta;
+ if (req_op(req) == REQ_OP_DRV_OUT) {
+ ret = -EFAULT;
+ if (copy_from_user(buf, ubuf, len))
+ goto out_free_meta;
+ } else {
+ memset(buf, 0, len);
+ }
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 347cb5daebc3..3f0c9ee09a12 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES |
- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 337a624a537c..a7fea4cbacd7 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
+ if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ return;
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
__nvme_rdma_stop_queue(queue);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 586458f765f1..1d9854484e2e 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -333,19 +333,21 @@ done:
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
mod_delayed_work(system_wq, &req->sq->auth_expired_work,
auth_expire_secs * HZ);
- return;
+ goto complete;
}
/* Final states, clear up variables */
nvmet_auth_sq_free(req->sq);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
nvmet_ctrl_fatal_error(ctrl);
+
+complete:
+ nvmet_req_complete(req, status);
}
static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
@@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
kfree(d);
done:
req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
+
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
nvmet_auth_sq_free(req->sq);
nvmet_ctrl_fatal_error(ctrl);
}
+ nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 468833675cc9..f11400a908f2 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -50,9 +50,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
- if (ns->bdev) {
- blkdev_put(ns->bdev, NULL);
+ if (ns->bdev_handle) {
+ bdev_release(ns->bdev_handle);
ns->bdev = NULL;
+ ns->bdev_handle = NULL;
}
}
@@ -84,17 +85,18 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
if (ns->buffered_io)
return -ENOTBLK;
- ns->bdev = blkdev_get_by_path(ns->device_path,
- BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(ns->bdev)) {
- ret = PTR_ERR(ns->bdev);
+ ns->bdev_handle = bdev_open_by_path(ns->device_path,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, NULL, NULL);
+ if (IS_ERR(ns->bdev_handle)) {
+ ret = PTR_ERR(ns->bdev_handle);
if (ret != -ENOTBLK) {
- pr_err("failed to open block device %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->bdev));
+ pr_err("failed to open block device %s: (%d)\n",
+ ns->device_path, ret);
}
- ns->bdev = NULL;
+ ns->bdev_handle = NULL;
return ret;
}
+ ns->bdev = ns->bdev_handle->bdev;
ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 8cfd60f3b564..360e385be33b 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -58,6 +58,7 @@
struct nvmet_ns {
struct percpu_ref ref;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
struct file *file;
bool readonly;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index cd92d7ddf5ed..197fc2ecb164 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
if (status == -EPIPE || status == -ECONNRESET)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else
@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0)
- goto free_crypto;
+ return ret; /* queue removal will cleanup */
queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue);
return 0;
-free_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvmet_tcp_free_crypto(queue);
- return ret;
}
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index a223d9537f22..e8b6f194925d 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -498,7 +498,7 @@ static const struct ocotp_params imx6sl_params = {
};
static const struct ocotp_params imx6sll_params = {
- .nregs = 128,
+ .nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
@@ -512,14 +512,14 @@ static const struct ocotp_params imx6sx_params = {
};
static const struct ocotp_params imx6ul_params = {
- .nregs = 128,
+ .nregs = 144,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ull_params = {
- .nregs = 64,
+ .nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 1d567604b650..376d023a0aa9 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -122,16 +122,10 @@ static int phy_mdm6600_power_on(struct phy *x)
{
struct phy_mdm6600 *ddata = phy_get_drvdata(x);
struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
- int error;
if (!ddata->enabled)
return -ENODEV;
- error = pinctrl_pm_select_default_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with default_state: %i\n",
- __func__, error);
-
gpiod_set_value_cansleep(enable_gpio, 1);
/* Allow aggressive PM for USB, it's only needed for n_gsm port */
@@ -160,11 +154,6 @@ static int phy_mdm6600_power_off(struct phy *x)
gpiod_set_value_cansleep(enable_gpio, 0);
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
return 0;
}
@@ -456,6 +445,7 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
{
struct gpio_desc *reset_gpio =
ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ int error;
ddata->enabled = false;
phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ);
@@ -471,6 +461,17 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
} else {
dev_err(ddata->dev, "Timed out powering down\n");
}
+
+ /*
+ * Keep reset gpio high with padconf internal pull-up resistor to
+ * prevent modem from waking up during deeper SoC idle states. The
+ * gpio bank lines can have glitches if not in the always-on wkup
+ * domain.
+ */
+ error = pinctrl_pm_select_sleep_state(ddata->dev);
+ if (error)
+ dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+ __func__, error);
}
static void phy_mdm6600_deferred_power_on(struct work_struct *work)
@@ -571,12 +572,6 @@ static int phy_mdm6600_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
- /* Active state selected in phy_mdm6600_power_on() */
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
error = phy_mdm6600_init_lines(ddata);
if (error)
return error;
@@ -627,10 +622,12 @@ idle:
pm_runtime_put_autosuspend(ddata->dev);
cleanup:
- if (error < 0)
+ if (error < 0) {
phy_mdm6600_device_power_off(ddata);
- pm_runtime_disable(ddata->dev);
- pm_runtime_dont_use_autosuspend(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ pm_runtime_dont_use_autosuspend(ddata->dev);
+ }
+
return error;
}
@@ -639,6 +636,7 @@ static void phy_mdm6600_remove(struct platform_device *pdev)
struct phy_mdm6600 *ddata = platform_get_drvdata(pdev);
struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ pm_runtime_get_noresume(ddata->dev);
pm_runtime_dont_use_autosuspend(ddata->dev);
pm_runtime_put_sync(ddata->dev);
pm_runtime_disable(ddata->dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index 8814f4322adf..3642a5d4f2f3 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -152,7 +152,7 @@ static int qcom_apq8064_sata_phy_init(struct phy *generic_phy)
return ret;
}
- /* SATA phy calibrated succesfully, power up to functional mode */
+ /* SATA phy calibrated successfully, power up to functional mode */
writel_relaxed(0x3E, base + SATA_PHY_POW_DWN_CTRL1);
writel_relaxed(0x01, base + SATA_PHY_RX_IMCAL0);
writel_relaxed(0x01, base + SATA_PHY_TX_IMCAL0);
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index ed08072ca032..5cb7e79b99b3 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -82,7 +82,7 @@ struct m31_priv_data {
unsigned int nregs;
};
-struct m31_phy_regs m31_ipq5332_regs[] = {
+static struct m31_phy_regs m31_ipq5332_regs[] = {
{
USB_PHY_CFG0,
UTMI_PHY_OVERRIDE_EN,
@@ -172,8 +172,7 @@ static int m31usb_phy_init(struct phy *phy)
ret = clk_prepare_enable(qphy->clk);
if (ret) {
- if (qphy->vreg)
- regulator_disable(qphy->vreg);
+ regulator_disable(qphy->vreg);
dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
return ret;
}
@@ -256,7 +255,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index cbb28afce135..5e6fc8103e9d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -859,10 +859,10 @@ static const struct qmp_phy_init_tbl sm8550_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG1, 0x4b),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
};
static const struct qmp_phy_init_tbl sm8550_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
@@ -2555,6 +2555,7 @@ static int qmp_combo_usb_power_on(struct phy *phy)
void __iomem *tx2 = qmp->tx2;
void __iomem *rx2 = qmp->rx2;
void __iomem *pcs = qmp->pcs;
+ void __iomem *pcs_usb = qmp->pcs_usb;
void __iomem *status;
unsigned int val;
int ret;
@@ -2576,6 +2577,9 @@ static int qmp_combo_usb_power_on(struct phy *phy)
qmp_combo_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (pcs_usb)
+ qmp_combo_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
index 9510e63ba9d8..c38530d6776b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
@@ -12,7 +12,7 @@
#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3 0xcc
#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6 0xd8
#define QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1 0xdc
-#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x90
+#define QPHY_USB_V6_PCS_POWER_STATE_CONFIG1 0x90
#define QPHY_USB_V6_PCS_RX_SIGDET_LVL 0x188
#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
@@ -23,6 +23,7 @@
#define QPHY_USB_V6_PCS_EQ_CONFIG1 0x1dc
#define QPHY_USB_V6_PCS_EQ_CONFIG5 0x1ec
+#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x00
#define QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x18
#define QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x3c
#define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x40
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 0130bb8e809a..c69577601ae0 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1112,8 +1112,6 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
@@ -1122,6 +1120,11 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+};
+
static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xc4),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x89),
@@ -1131,9 +1134,6 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
@@ -1142,6 +1142,12 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
+};
+
struct qmp_usb_offsets {
u16 serdes;
u16 pcs;
@@ -1383,6 +1389,8 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
.pcs_tbl = sa8775p_usb3_uniphy_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sa8775p_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_usb_tbl),
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = qcm2290_usb3phy_reset_l,
@@ -1405,6 +1413,8 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
.rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
.pcs_tbl = sc8280xp_usb3_uniphy_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sc8280xp_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_usb_tbl),
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = qcm2290_usb3phy_reset_l,
@@ -1703,6 +1713,7 @@ static int qmp_usb_power_on(struct phy *phy)
void __iomem *tx = qmp->tx;
void __iomem *rx = qmp->rx;
void __iomem *pcs = qmp->pcs;
+ void __iomem *pcs_usb = qmp->pcs_usb;
void __iomem *status;
unsigned int val;
int ret;
@@ -1726,6 +1737,9 @@ static int qmp_usb_power_on(struct phy *phy)
qmp_usb_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (pcs_usb)
+ qmp_usb_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
index 650e20ed69af..75ac7e7c31ae 100644
--- a/drivers/phy/realtek/Kconfig
+++ b/drivers/phy/realtek/Kconfig
@@ -2,6 +2,9 @@
#
# Phy drivers for Realtek platforms
#
+
+if ARCH_REALTEK || COMPILE_TEST
+
config PHY_RTK_RTD_USB2PHY
tristate "Realtek RTD USB2 PHY Transceiver Driver"
depends on USB_SUPPORT
@@ -25,3 +28,5 @@ config PHY_RTK_RTD_USB3PHY
The DHC (digital home center) RTD series SoCs used the Synopsys
DWC3 USB IP. This driver will do the PHY initialization
of the parameters.
+
+endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
index 5e7ee060b404..aedc78bd37f7 100644
--- a/drivers/phy/realtek/phy-rtk-usb2.c
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -853,17 +853,11 @@ static inline void create_debug_files(struct rtk_phy *rtk_phy)
rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
phy_debug_root);
- if (!rtk_phy->debug_dir)
- return;
- if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb2_parameter_fops))
- goto file_error;
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb2_parameter_fops);
return;
-
-file_error:
- debugfs_remove_recursive(rtk_phy->debug_dir);
}
static inline void remove_debug_files(struct rtk_phy *rtk_phy)
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
index 7881f908aade..dfb3122f3f11 100644
--- a/drivers/phy/realtek/phy-rtk-usb3.c
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -416,17 +416,11 @@ static inline void create_debug_files(struct rtk_phy *rtk_phy)
return;
rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
- if (!rtk_phy->debug_dir)
- return;
- if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb3_parameter_fops))
- goto file_error;
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb3_parameter_fops);
return;
-
-file_error:
- debugfs_remove_recursive(rtk_phy->debug_dir);
}
static inline void remove_debug_files(struct rtk_phy *rtk_phy)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e2f7519bef04..e9dc9638120a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1022,20 +1022,17 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
static struct pinctrl *find_pinctrl(struct device *dev)
{
- struct pinctrl *entry, *p = NULL;
+ struct pinctrl *p;
mutex_lock(&pinctrl_list_mutex);
-
- list_for_each_entry(entry, &pinctrl_list, node) {
- if (entry->dev == dev) {
- p = entry;
- kref_get(&p->users);
- break;
+ list_for_each_entry(p, &pinctrl_list, node)
+ if (p->dev == dev) {
+ mutex_unlock(&pinctrl_list_mutex);
+ return p;
}
- }
mutex_unlock(&pinctrl_list_mutex);
- return p;
+ return NULL;
}
static void pinctrl_free(struct pinctrl *p, bool inlist);
@@ -1143,6 +1140,7 @@ struct pinctrl *pinctrl_get(struct device *dev)
p = find_pinctrl(dev);
if (p) {
dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
+ kref_get(&p->users);
return p;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index e5a418026ba3..0b2839d27fd6 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -32,7 +32,8 @@ struct lpi_pinctrl {
char __iomem *tlmm_base;
char __iomem *slew_base;
struct clk_bulk_data clks[MAX_LPI_NUM_CLKS];
- struct mutex slew_access_lock;
+ /* Protects from concurrent register updates */
+ struct mutex lock;
DECLARE_BITMAP(ever_gpio, MAX_NR_GPIO);
const struct lpi_pinctrl_variant_data *data;
};
@@ -103,6 +104,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ mutex_lock(&pctrl->lock);
val = lpi_gpio_read(pctrl, pin, LPI_GPIO_CFG_REG);
/*
@@ -128,6 +130,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
u32p_replace_bits(&val, i, LPI_GPIO_FUNCTION_MASK);
lpi_gpio_write(pctrl, pin, LPI_GPIO_CFG_REG, val);
+ mutex_unlock(&pctrl->lock);
return 0;
}
@@ -233,14 +236,14 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
if (slew_offset == LPI_NO_SLEW)
break;
- mutex_lock(&pctrl->slew_access_lock);
+ mutex_lock(&pctrl->lock);
sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
sval |= arg << slew_offset;
iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
- mutex_unlock(&pctrl->slew_access_lock);
+ mutex_unlock(&pctrl->lock);
break;
default:
return -EINVAL;
@@ -256,6 +259,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
}
+ mutex_lock(&pctrl->lock);
val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
@@ -264,6 +268,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
u32p_replace_bits(&val, output_enabled, LPI_GPIO_OE_MASK);
lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
+ mutex_unlock(&pctrl->lock);
return 0;
}
@@ -461,7 +466,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
pctrl->chip.label = dev_name(dev);
pctrl->chip.can_sleep = false;
- mutex_init(&pctrl->slew_access_lock);
+ mutex_init(&pctrl->lock);
pctrl->ctrl = devm_pinctrl_register(dev, &pctrl->desc, pctrl);
if (IS_ERR(pctrl->ctrl)) {
@@ -483,7 +488,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return 0;
err_pinctrl:
- mutex_destroy(&pctrl->slew_access_lock);
+ mutex_destroy(&pctrl->lock);
clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
return ret;
@@ -495,7 +500,7 @@ int lpi_pinctrl_remove(struct platform_device *pdev)
struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
int i;
- mutex_destroy(&pctrl->slew_access_lock);
+ mutex_destroy(&pctrl->lock);
clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
for (i = 0; i < pctrl->data->npins; i++)
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index fd38d8c8371e..ab7d7a1235b8 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -609,24 +609,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (!IS_VRING_DROP(vring)) {
- if (is_rx)
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len,
- sizeof(u64));
+ } else {
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (!IS_VRING_DROP(vring)) {
- if (is_rx)
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ } else {
+ data = 0;
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
}
vring->cur_len = len;
}
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index f433a13c3689..a5a3941b3f43 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -159,8 +159,7 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
- platform_profile_register(&tpd->handler);
- return 0;
+ return platform_profile_register(&tpd->handler);
}
static void surface_platform_profile_remove(struct ssam_device *sdev)
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index ad702463a65d..6bbffb081053 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -111,6 +111,79 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "21A1"),
}
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=218024 */
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YT"),
+ }
+ },
+ {
+ .ident = "V14 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83GE"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82YU"),
+ }
+ },
+ {
+ .ident = "V15 G4 AMN",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "83CQ"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 14AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VF"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82VG"),
+ }
+ },
+ {
+ .ident = "IdeaPad 1 15AMN7",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82X5"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 14AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XN"),
+ }
+ },
+ {
+ .ident = "IdeaPad Slim 3 15AMN8",
+ .driver_data = &quirk_s2idle_bug,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82XQ"),
+ }
+ },
/* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */
{
.ident = "HP Laptop 15s-eq2xxx",
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index cadbb557a108..1417e230edbd 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -105,6 +105,8 @@ struct apple_gmux_config {
#define GMUX_BRIGHTNESS_MASK 0x00ffffff
#define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK
+# define MMIO_GMUX_MAX_BRIGHTNESS 0xffff
+
static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
{
return inb(gmux_data->iostart + port);
@@ -857,7 +859,17 @@ get_version:
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
+
+ /*
+ * All MMIO gmux's have 0xffff as max brightness, but some iMacs incorrectly
+ * report 0x03ff, despite the firmware being happy to set 0xffff as the brightness
+ * at boot. Force 0xffff for all MMIO gmux's so they all have the correct brightness
+ * range.
+ */
+ if (type == APPLE_GMUX_TYPE_MMIO)
+ props.max_brightness = MMIO_GMUX_MAX_BRIGHTNESS;
+ else
+ props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
#if IS_REACHABLE(CONFIG_ACPI_VIDEO)
register_bdev = acpi_video_get_backlight_type() == acpi_backlight_apple_gmux;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index d85d895fee89..df1db54d4e18 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -531,6 +531,9 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } },
+ { KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */
+ { KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 9f8cea5f9615..19bfd30861aa 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -3826,7 +3826,6 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
{
unsigned int key_value = 1;
bool autorelease = 1;
- int orig_code = code;
if (asus->driver->key_filter) {
asus->driver->key_filter(asus->driver, &code, &key_value,
@@ -3835,16 +3834,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
- if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
- code = ASUS_WMI_BRN_UP;
- else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
- code = ASUS_WMI_BRN_DOWN;
-
- if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
- if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
- asus_wmi_backlight_notify(asus, orig_code);
- return;
- }
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor &&
+ code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNDOWN_MAX) {
+ asus_wmi_backlight_notify(asus, code);
+ return;
}
if (code == NOTIFY_KBD_BRTUP) {
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index a478ebfd34df..fc41d1b1bb7f 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -18,7 +18,7 @@
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
-#define ASUS_WMI_BRN_DOWN 0x20
+#define ASUS_WMI_BRN_DOWN 0x2e
#define ASUS_WMI_BRN_UP 0x2f
struct module;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 1152deaa0078..33ab207493e3 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -176,7 +176,7 @@ show_uncore_data(initial_max_freq_khz);
static int create_attr_group(struct uncore_data *data, char *name)
{
- int ret, index = 0;
+ int ret, freq, index = 0;
init_attribute_rw(max_freq_khz);
init_attribute_rw(min_freq_khz);
@@ -197,7 +197,11 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+
+ ret = uncore_read_freq(data, &freq);
+ if (!ret)
+ data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+
data->uncore_attrs[index] = NULL;
data->uncore_attr_group.name = name;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 3d96dbf79a72..a2ffe4157df1 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -6514,6 +6514,7 @@ static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
return 0;
fail_mlxplat_i2c_mux_topology_init:
+ platform_device_unregister(priv->pdev_i2c);
fail_platform_i2c_register:
fail_mlxplat_mlxcpld_verify_bus_topology:
return err;
@@ -6521,6 +6522,7 @@ fail_mlxplat_mlxcpld_verify_bus_topology:
static void mlxplat_i2c_main_exit(struct mlxplat_priv *priv)
{
+ mlxplat_pre_exit(priv);
mlxplat_i2c_mux_topology_exit(priv);
if (priv->pdev_i2c)
platform_device_unregister(priv->pdev_i2c);
@@ -6597,7 +6599,7 @@ static int mlxplat_probe(struct platform_device *pdev)
fail_register_reboot_notifier:
fail_regcache_sync:
- mlxplat_pre_exit(priv);
+ mlxplat_i2c_main_exit(priv);
fail_mlxplat_i2c_main_init:
fail_regmap_write:
fail_alloc:
@@ -6614,7 +6616,6 @@ static int mlxplat_remove(struct platform_device *pdev)
pm_power_off = NULL;
if (mlxplat_reboot_nb)
unregister_reboot_notifier(mlxplat_reboot_nb);
- mlxplat_pre_exit(priv);
mlxplat_i2c_main_exit(priv);
mlxplat_post_exit();
return 0;
diff --git a/drivers/platform/x86/msi-ec.c b/drivers/platform/x86/msi-ec.c
index f26a3121092f..492eb383ee7a 100644
--- a/drivers/platform/x86/msi-ec.c
+++ b/drivers/platform/x86/msi-ec.c
@@ -276,14 +276,13 @@ static struct msi_ec_conf CONF2 __initdata = {
static const char * const ALLOWED_FW_3[] __initconst = {
"1592EMS1.111",
- "E1592IMS.10C",
NULL
};
static struct msi_ec_conf CONF3 __initdata = {
.allowed_fw = ALLOWED_FW_3,
.charge_control = {
- .address = 0xef,
+ .address = 0xd7,
.offset_start = 0x8a,
.offset_end = 0x80,
.range_min = 0x8a,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9569f11dec8c..40878e327afd 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4092,7 +4092,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf,
if (ret > 0) {
struct inode *inode = file_inode(file);
- inode->i_atime = current_time(inode);
+ inode_set_atime_to_ts(inode, current_time(inode));
}
return ret;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 215597f73be4..d440319a7945 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -412,7 +412,8 @@ dasd_state_ready_to_online(struct dasd_device * device)
KOBJ_CHANGE);
return 0;
}
- disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
+ disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ KOBJ_CHANGE);
}
return 0;
}
@@ -432,7 +433,8 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW))
- disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
+ disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ KOBJ_CHANGE);
return 0;
}
@@ -3590,7 +3592,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers.
*/
if (device->block) {
- max_count = device->block->bdev ? 0 : -1;
+ max_count = device->block->bdev_handle ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
@@ -3636,8 +3638,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
- if (device->block)
- bdev_mark_dead(device->block->bdev, false);
+ if (device->block && device->block->bdev_handle)
+ bdev_mark_dead(device->block->bdev_handle->bdev, false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index fe5108a1b332..55e3abe94cde 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -127,15 +127,15 @@ void dasd_gendisk_free(struct dasd_block *block)
*/
int dasd_scan_partitions(struct dasd_block *block)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int rc;
- bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL,
- NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
- PTR_ERR(bdev));
+ PTR_ERR(bdev_handle));
return -ENODEV;
}
@@ -147,16 +147,15 @@ int dasd_scan_partitions(struct dasd_block *block)
"scan partitions error, rc %d", rc);
/*
- * Since the matching blkdev_put call to the blkdev_get in
- * this function is not called before dasd_destroy_partitions
- * the offline open_count limit needs to be increased from
- * 0 to 1. This is done by setting device->bdev (see
- * dasd_generic_set_offline). As long as the partition
- * detection is running no offline should be allowed. That
- * is why the assignment to device->bdev is done AFTER
- * the BLKRRPART ioctl.
+ * Since the matching bdev_release() call to the
+ * bdev_open_by_path() in this function is not called before
+ * dasd_destroy_partitions the offline open_count limit needs to be
+ * increased from 0 to 1. This is done by setting device->bdev_handle
+ * (see dasd_generic_set_offline). As long as the partition detection
+ * is running no offline should be allowed. That is why the assignment
+ * to block->bdev_handle is done AFTER the BLKRRPART ioctl.
*/
- block->bdev = bdev;
+ block->bdev_handle = bdev_handle;
return 0;
}
@@ -166,21 +165,21 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
/*
- * Get the bdev pointer from the device structure and clear
- * device->bdev to lower the offline open_count limit again.
+ * Get the bdev_handle pointer from the device structure and clear
+ * device->bdev_handle to lower the offline open_count limit again.
*/
- bdev = block->bdev;
- block->bdev = NULL;
+ bdev_handle = block->bdev_handle;
+ block->bdev_handle = NULL;
- mutex_lock(&bdev->bd_disk->open_mutex);
- bdev_disk_changed(bdev->bd_disk, true);
- mutex_unlock(&bdev->bd_disk->open_mutex);
+ mutex_lock(&bdev_handle->bdev->bd_disk->open_mutex);
+ bdev_disk_changed(bdev_handle->bdev->bd_disk, true);
+ mutex_unlock(&bdev_handle->bdev->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
}
int dasd_gendisk_init(void)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 8a4dbe9d7741..2e663131adaf 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -650,7 +650,7 @@ struct dasd_block {
struct gendisk *gdp;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
atomic_t open_count;
unsigned long blocks; /* size of volume in blocks */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index d55862605b82..61b9675e2a67 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -537,7 +537,7 @@ static int __dasd_ioctl_information(struct dasd_block *block,
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
- if (!block->bdev)
+ if (!block->bdev_handle)
dasd_info->open_count++;
/*
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3ef636935a54..3ff46fc694f8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -233,17 +233,19 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
*/
ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
if (ret)
- goto err;
+ goto err_lock;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
if (ret)
- goto err;
+ goto err_lock;
return sch;
+err_lock:
+ kfree(sch->lock);
err:
kfree(sch);
return ERR_PTR(ret);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c3c1f466fe01..605013d3ee83 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12913,8 +12913,10 @@ _mpt3sas_init(void)
mpt3sas_ctl_init(hbas_to_enumerate);
error = pci_register_driver(&mpt3sas_driver);
- if (error)
+ if (error) {
+ mpt3sas_ctl_exit(hbas_to_enumerate);
scsih_exit();
+ }
return error;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 50db08265c51..dcae09a37d49 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4953,7 +4953,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- if (!list_empty(&ha->base_qpair->dsd_list)) {
+ if (ha->base_qpair && !list_empty(&ha->base_qpair->dsd_list)) {
struct dsd_dma *dsd_ptr, *tdsd_ptr;
/* clean up allocated prev pool */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 83b6a3f3863b..6effa13039f3 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -209,7 +209,8 @@ manage_start_stop_show(struct device *dev,
return sysfs_emit(buf, "%u\n",
sdp->manage_system_start_stop &&
- sdp->manage_runtime_start_stop);
+ sdp->manage_runtime_start_stop &&
+ sdp->manage_shutdown);
}
static DEVICE_ATTR_RO(manage_start_stop);
@@ -275,6 +276,35 @@ manage_runtime_start_stop_store(struct device *dev,
}
static DEVICE_ATTR_RW(manage_runtime_start_stop);
+static ssize_t manage_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_shutdown);
+}
+
+static ssize_t manage_shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_shutdown = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_shutdown);
+
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -607,6 +637,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_manage_start_stop.attr,
&dev_attr_manage_system_start_stop.attr,
&dev_attr_manage_runtime_start_stop.attr,
+ &dev_attr_manage_shutdown.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
@@ -3819,8 +3850,10 @@ static void sd_shutdown(struct device *dev)
sd_sync_cache(sdkp, NULL);
}
- if (system_state != SYSTEM_RESTART &&
- sdkp->device->manage_system_start_stop) {
+ if ((system_state != SYSTEM_RESTART &&
+ sdkp->device->manage_system_start_stop) ||
+ (system_state == SYSTEM_POWER_OFF &&
+ sdkp->device->manage_shutdown)) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 12040ce116a5..acc812e490d0 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -334,12 +334,14 @@ if RISCV
config ARCH_R9A07G043
bool "RISC-V Platform support for RZ/Five"
depends on NONPORTABLE
+ depends on RISCV_ALTERNATIVE
+ depends on !RISCV_ISA_ZICBOM
+ depends on RISCV_SBI
select ARCH_RZG2L
- select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT
+ select AX45MP_L2_CACHE
select DMA_GLOBAL_POOL
- select ERRATA_ANDES if RISCV_SBI
- select ERRATA_ANDES_CMO if ERRATA_ANDES
-
+ select ERRATA_ANDES
+ select ERRATA_ANDES_CMO
help
This enables support for the Renesas RZ/Five SoC.
diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c
index 0ca21ff0e9cc..e42248519688 100644
--- a/drivers/spi/spi-npcm-fiu.c
+++ b/drivers/spi/spi-npcm-fiu.c
@@ -353,8 +353,9 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
uma_cfg |= ilog2(op->cmd.buswidth);
uma_cfg |= ilog2(op->addr.buswidth)
<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
- uma_cfg |= ilog2(op->dummy.buswidth)
- << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+ if (op->dummy.nbytes)
+ uma_cfg |= ilog2(op->dummy.buswidth)
+ << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
uma_cfg |= ilog2(op->data.buswidth)
<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index a6a06a5f7483..8eb9eb7ce5df 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -91,7 +91,8 @@ static int iblock_configure_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct request_queue *q;
- struct block_device *bd = NULL;
+ struct bdev_handle *bdev_handle;
+ struct block_device *bd;
struct blk_integrity *bi;
blk_mode_t mode = BLK_OPEN_READ;
unsigned int max_write_zeroes_sectors;
@@ -116,12 +117,14 @@ static int iblock_configure_device(struct se_device *dev)
else
dev->dev_flags |= DF_READ_ONLY;
- bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev, NULL);
- if (IS_ERR(bd)) {
- ret = PTR_ERR(bd);
+ bdev_handle = bdev_open_by_path(ib_dev->ibd_udev_path, mode, ib_dev,
+ NULL);
+ if (IS_ERR(bdev_handle)) {
+ ret = PTR_ERR(bdev_handle);
goto out_free_bioset;
}
- ib_dev->ibd_bd = bd;
+ ib_dev->ibd_bdev_handle = bdev_handle;
+ ib_dev->ibd_bd = bd = bdev_handle->bdev;
q = bdev_get_queue(bd);
@@ -177,7 +180,7 @@ static int iblock_configure_device(struct se_device *dev)
return 0;
out_blkdev_put:
- blkdev_put(ib_dev->ibd_bd, ib_dev);
+ bdev_release(ib_dev->ibd_bdev_handle);
out_free_bioset:
bioset_exit(&ib_dev->ibd_bio_set);
out:
@@ -202,8 +205,8 @@ static void iblock_destroy_device(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- if (ib_dev->ibd_bd != NULL)
- blkdev_put(ib_dev->ibd_bd, ib_dev);
+ if (ib_dev->ibd_bdev_handle)
+ bdev_release(ib_dev->ibd_bdev_handle);
bioset_exit(&ib_dev->ibd_bio_set);
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 8c55375d2f75..683f9a55945b 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -32,6 +32,7 @@ struct iblock_dev {
u32 ibd_flags;
struct bio_set ibd_bio_set;
struct block_device *ibd_bd;
+ struct bdev_handle *ibd_bdev_handle;
bool ibd_readonly;
struct iblock_dev_plug *ibd_plug;
} ____cacheline_aligned;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 0d4f09693ef4..41b7489d37ce 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -352,7 +352,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
struct Scsi_Host *sh = sd->host;
- struct block_device *bd;
+ struct bdev_handle *bdev_handle;
int ret;
if (scsi_device_get(sd)) {
@@ -366,18 +366,18 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK and TYPE_ZBC using supplied udev_path
*/
- bd = blkdev_get_by_path(dev->udev_path, BLK_OPEN_WRITE | BLK_OPEN_READ,
- pdv, NULL);
- if (IS_ERR(bd)) {
- pr_err("pSCSI: blkdev_get_by_path() failed\n");
+ bdev_handle = bdev_open_by_path(dev->udev_path,
+ BLK_OPEN_WRITE | BLK_OPEN_READ, pdv, NULL);
+ if (IS_ERR(bdev_handle)) {
+ pr_err("pSCSI: bdev_open_by_path() failed\n");
scsi_device_put(sd);
- return PTR_ERR(bd);
+ return PTR_ERR(bdev_handle);
}
- pdv->pdv_bd = bd;
+ pdv->pdv_bdev_handle = bdev_handle;
ret = pscsi_add_device_to_list(dev, sd);
if (ret) {
- blkdev_put(pdv->pdv_bd, pdv);
+ bdev_release(bdev_handle);
scsi_device_put(sd);
return ret;
}
@@ -564,9 +564,9 @@ static void pscsi_destroy_device(struct se_device *dev)
* from pscsi_create_type_disk()
*/
if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) &&
- pdv->pdv_bd) {
- blkdev_put(pdv->pdv_bd, pdv);
- pdv->pdv_bd = NULL;
+ pdv->pdv_bdev_handle) {
+ bdev_release(pdv->pdv_bdev_handle);
+ pdv->pdv_bdev_handle = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
@@ -994,8 +994,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
- if (pdv->pdv_bd)
- return bdev_nr_sectors(pdv->pdv_bd);
+ if (pdv->pdv_bdev_handle)
+ return bdev_nr_sectors(pdv->pdv_bdev_handle->bdev);
return 0;
}
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 23d9a6e340d4..b0a3ef136592 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -37,7 +37,7 @@ struct pscsi_dev_virt {
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
- struct block_device *pdv_bd;
+ struct bdev_handle *pdv_bdev_handle;
struct scsi_device *pdv_sd;
struct Scsi_Host *pdv_lld_host;
} ____cacheline_aligned;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index dd0a1ef8cf12..27bd6ca6f99e 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1907,14 +1907,14 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
in = &sw->ports[ev->port];
if (!tb_port_is_dpin(in)) {
tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
- goto unlock;
+ goto put_sw;
}
tb_port_dbg(in, "handling bandwidth allocation request\n");
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
tb_port_warn(in, "bandwidth allocation mode not enabled\n");
- goto unlock;
+ goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
@@ -1923,7 +1923,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "no bandwidth request active\n");
else
tb_port_warn(in, "failed to read requested bandwidth\n");
- goto unlock;
+ goto put_sw;
}
requested_bw = ret;
@@ -1932,7 +1932,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
tb_port_warn(in, "failed to find tunnel\n");
- goto unlock;
+ goto put_sw;
}
out = tunnel->dst_port;
@@ -1959,6 +1959,8 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_recalc_estimated_bandwidth(tb);
}
+put_sw:
+ tb_switch_put(sw);
unlock:
mutex_unlock(&tb->lock);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 8a94e5a43c6d..d13d2f2e76c7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -818,7 +818,7 @@ static void tty_update_time(struct tty_struct *tty, bool mtime)
spin_lock(&tty->files_lock);
list_for_each_entry(priv, &tty->tty_files, list) {
struct inode *inode = file_inode(priv->file);
- struct timespec64 *time = mtime ? &inode->i_mtime : &inode->i_atime;
+ struct timespec64 time = mtime ? inode_get_mtime(inode) : inode_get_atime(inode);
/*
* We only care if the two values differ in anything other than the
@@ -826,8 +826,12 @@ static void tty_update_time(struct tty_struct *tty, bool mtime)
* the time of the tty device, otherwise it could be construded as a
* security leak to let userspace know the exact timing of the tty.
*/
- if ((sec ^ time->tv_sec) & ~7)
- time->tv_sec = sec;
+ if ((sec ^ time.tv_sec) & ~7) {
+ if (mtime)
+ inode_set_mtime(inode, sec, 0);
+ else
+ inode_set_atime(inode, sec, 0);
+ }
}
spin_unlock(&tty->files_lock);
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4f68f6ef3cc1..3beb6a862e80 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -2642,21 +2642,24 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL\n", __func__);
ret = proc_control(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_BULK:
snoop(&dev->dev, "%s: BULK\n", __func__);
ret = proc_bulk(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_RESETEP:
snoop(&dev->dev, "%s: RESETEP\n", __func__);
ret = proc_resetep(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_RESET:
@@ -2668,7 +2671,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
ret = proc_clearhalt(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_GETDRIVER:
@@ -2695,7 +2699,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
ret = proc_submiturb(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
#ifdef CONFIG_COMPAT
@@ -2703,14 +2708,16 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: CONTROL32\n", __func__);
ret = proc_control_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_BULK32:
snoop(&dev->dev, "%s: BULK32\n", __func__);
ret = proc_bulk_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_DISCSIGNAL32:
@@ -2722,7 +2729,8 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
ret = proc_submiturb_compat(ps, p);
if (ret >= 0)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
break;
case USBDEVFS_IOCTL32:
@@ -2804,7 +2812,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
done:
usb_unlock_device(dev);
if (ret >= 0)
- inode->i_atime = current_time(inode);
+ inode_set_atime_to_ts(inode, current_time(inode));
return ret;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6e9ef35a43a7..ec26df0306f2 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1383,8 +1383,8 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
inode->i_mode = perms->mode;
inode->i_uid = perms->uid;
inode->i_gid = perms->gid;
- inode->i_atime = ts;
- inode->i_mtime = ts;
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
inode->i_private = data;
if (fops)
inode->i_fop = fops;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index ce9e31f3d26b..cdc0926100fd 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1969,7 +1969,7 @@ gadgetfs_make_inode (struct super_block *sb,
inode->i_mode = mode;
inode->i_uid = make_kuid(&init_user_ns, default_uid);
inode->i_gid = make_kgid(&init_user_ns, default_gid);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_private = data;
inode->i_fop = fops;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7994a4549a6c..45dcfaadaf98 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -203,6 +203,9 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
+#define DELL_PRODUCT_FM101R 0x8213
+#define DELL_PRODUCT_FM101R_ESIM 0x8215
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -1108,6 +1111,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
.driver_info = RSVD(0) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1290,6 +1295,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
@@ -2262,6 +2268,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ } /* Terminating entry */
diff --git a/drivers/vdpa/mlx5/net/debug.c b/drivers/vdpa/mlx5/net/debug.c
index 60d6ac68cdc4..9c85162c19fc 100644
--- a/drivers/vdpa/mlx5/net/debug.c
+++ b/drivers/vdpa/mlx5/net/debug.c
@@ -146,7 +146,8 @@ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev)
ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs);
}
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg)
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev)
{
- debugfs_remove_recursive(dbg);
+ debugfs_remove_recursive(ndev->debugfs);
+ ndev->debugfs = NULL;
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 40a03b08d7cf..946488b8989f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
}
+static int read_umem_params(struct mlx5_vdpa_net *ndev)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01);
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ int out_size;
+ void *caps;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev,
+ "Failed reading vdpa umem capabilities with err %d\n", err);
+ goto out;
+ }
+
+ caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a);
+ ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b);
+
+ ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a);
+ ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b);
+
+ ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a);
+ ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b);
+
+out:
+ kfree(out);
+ return 0;
+}
+
static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
struct mlx5_vdpa_umem **umemp)
{
- struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
- int p_a;
- int p_b;
+ u32 p_a;
+ u32 p_b;
switch (num) {
case 1:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
+ p_a = ndev->umem_1_buffer_param_a;
+ p_b = ndev->umem_1_buffer_param_b;
*umemp = &mvq->umem1;
break;
case 2:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
+ p_a = ndev->umem_2_buffer_param_a;
+ p_b = ndev->umem_2_buffer_param_b;
*umemp = &mvq->umem2;
break;
case 3:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
+ p_a = ndev->umem_3_buffer_param_a;
+ p_b = ndev->umem_3_buffer_param_b;
*umemp = &mvq->umem3;
break;
}
+
(*umemp)->size = p_a * mvq->num_ent + p_b;
}
@@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto out;
}
mlx5_vdpa_add_debugfs(ndev);
+
+ err = read_umem_params(ndev);
+ if (err)
+ goto err_setup;
+
err = setup_virtqueues(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
@@ -2713,7 +2758,7 @@ err_tir:
err_rqt:
teardown_virtqueues(ndev);
err_setup:
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
+ mlx5_vdpa_remove_debugfs(ndev);
out:
return err;
}
@@ -2727,8 +2772,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
if (!ndev->setup)
return;
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
- ndev->debugfs = NULL;
+ mlx5_vdpa_remove_debugfs(ndev);
teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
@@ -3489,8 +3533,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
- ndev->debugfs = NULL;
unregister_link_notifier(ndev);
_vdpa_unregister_device(dev);
wq = mvdev->wq;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
index 36c44d9fdd16..90b556a57971 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.h
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
@@ -65,6 +65,15 @@ struct mlx5_vdpa_net {
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
struct mlx5_vdpa_irq_pool irqp;
struct dentry *debugfs;
+
+ u32 umem_1_buffer_param_a;
+ u32 umem_1_buffer_param_b;
+
+ u32 umem_2_buffer_param_a;
+ u32 umem_2_buffer_param_b;
+
+ u32 umem_3_buffer_param_a;
+ u32 umem_3_buffer_param_b;
};
struct mlx5_vdpa_counter {
@@ -88,7 +97,7 @@ struct macvlan_node {
};
void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 00d7d72713be..b3a3cb165795 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -499,12 +499,13 @@ static int __init vdpasim_blk_init(void)
GFP_KERNEL);
if (!shared_buffer) {
ret = -ENOMEM;
- goto parent_err;
+ goto mgmt_dev_err;
}
}
return 0;
-
+mgmt_dev_err:
+ vdpa_mgmtdev_unregister(&mgmt_dev);
parent_err:
device_unregister(&vdpasim_blk_mgmtdev);
return ret;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c71d573f1c94..e0c181ad17e3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1458,9 +1458,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
goto done;
}
- if ((msg.type == VHOST_IOTLB_UPDATE ||
- msg.type == VHOST_IOTLB_INVALIDATE) &&
- msg.size == 0) {
+ if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
ret = -EINVAL;
goto done;
}
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 5c87817a4f4c..3dcf83f5e7b4 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3440,11 +3440,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
}
info->fix.mmio_start = raddr;
+#if defined(__i386__) || defined(__ia64__)
/*
* By using strong UC we force the MTRR to never have an
* effect on the MMIO region on both non-PAT and PAT systems.
*/
par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
+#else
+ par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
+#endif
if (par->ati_regbase == NULL)
return -ENOMEM;
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index 6d4bfeecee35..5b80bf3dae50 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -382,7 +382,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
- unsigned long const bits_per_line = p->fix.line_length*8u;
+ unsigned int const bits_per_line = p->fix.line_length * 8u;
unsigned long __iomem *base = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index c1eda3190968..7b8bd3a2bedc 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -316,7 +316,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
- unsigned long const bits_per_line = p->fix.line_length*8u;
+ unsigned int const bits_per_line = p->fix.line_length * 8u;
unsigned long *base = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index 167585a889d3..719b99a9bc77 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1406,7 +1406,7 @@ struct mmphw_ctrl {
/*pathes*/
int path_num;
- struct mmphw_path_plat path_plats[];
+ struct mmphw_path_plat path_plats[] __counted_by(path_num);
};
static inline int overlay_is_vid(struct mmp_overlay *overlay)
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index f28cb90947a3..42c96f1cfc93 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -1645,13 +1645,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
}
fbdev->int_irq = platform_get_irq(pdev, 0);
if (fbdev->int_irq < 0) {
- r = ENXIO;
+ r = -ENXIO;
goto cleanup;
}
fbdev->ext_irq = platform_get_irq(pdev, 1);
if (fbdev->ext_irq < 0) {
- r = ENXIO;
+ r = -ENXIO;
goto cleanup;
}
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 3d76ce111488..cf0f706762b4 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1214,7 +1214,7 @@ static struct platform_driver sa1100fb_driver = {
},
};
-int __init sa1100fb_init(void)
+static int __init sa1100fb_init(void)
{
if (fb_get_options("sa1100fb", NULL))
return -ENODEV;
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index a1a67830fbbc..e1f421e91b4f 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1928,10 +1928,10 @@ static void uvesafb_exit(void)
}
}
- cn_del_callback(&uvesafb_cn_id);
driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
platform_device_unregister(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
+ cn_del_callback(&uvesafb_cn_id);
}
module_exit(uvesafb_exit);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 5b15936a5214..2d5d252ef419 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
&num_pages);
- target = num_pages;
+ /*
+ * Aligned up to guest page size to avoid inflating and deflating
+ * balloon endlessly.
+ */
+ target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
return target - vb->num_pages;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 97760f611295..59892a31cf76 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -631,14 +631,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(vm_dev->base))
- return PTR_ERR(vm_dev->base);
+ if (IS_ERR(vm_dev->base)) {
+ rc = PTR_ERR(vm_dev->base);
+ goto free_vm_dev;
+ }
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
/* Check device version */
@@ -646,7 +649,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
- return -ENXIO;
+ rc = -ENXIO;
+ goto free_vm_dev;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -655,7 +659,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@@ -685,6 +690,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
put_device(&vm_dev->vdev.dev);
return rc;
+
+free_vm_dev:
+ kfree(vm_dev);
+ return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index aad7d9296e77..9cb601e16688 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -291,7 +291,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
err = -EINVAL;
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_common_cfg),
+ 0, sizeof(struct virtio_pci_modern_common_cfg),
NULL, NULL);
if (!mdev->common)
goto err_map_common;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 0d28ecf668d0..b845ee18a80b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -260,7 +260,7 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_blocks = 0;
inode->i_rdev = rdev;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mapping->a_ops = &v9fs_addr_operations;
inode->i_private = NULL;
@@ -1150,8 +1150,8 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
set_nlink(inode, 1);
- inode->i_atime.tv_sec = stat->atime;
- inode->i_mtime.tv_sec = stat->mtime;
+ inode_set_atime(inode, stat->atime, 0);
+ inode_set_mtime(inode, stat->mtime, 0);
inode_set_ctime(inode, stat->mtime, 0);
inode->i_uid = v9ses->dfltuid;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 1312f68965ac..c7319af2f471 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -641,10 +641,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
struct v9fs_inode *v9inode = V9FS_I(inode);
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ inode_set_atime(inode, stat->st_atime_sec,
+ stat->st_atime_nsec);
+ inode_set_mtime(inode, stat->st_mtime_sec,
+ stat->st_mtime_nsec);
inode_set_ctime(inode, stat->st_ctime_sec,
stat->st_ctime_nsec);
inode->i_uid = stat->st_uid;
@@ -660,12 +660,12 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
- inode->i_atime.tv_sec = stat->st_atime_sec;
- inode->i_atime.tv_nsec = stat->st_atime_nsec;
+ inode_set_atime(inode, stat->st_atime_sec,
+ stat->st_atime_nsec);
}
if (stat->st_result_mask & P9_STATS_MTIME) {
- inode->i_mtime.tv_sec = stat->st_mtime_sec;
- inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+ inode_set_mtime(inode, stat->st_mtime_sec,
+ stat->st_mtime_nsec);
}
if (stat->st_result_mask & P9_STATS_CTIME) {
inode_set_ctime(inode, stat->st_ctime_sec,
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index e00cf8109b3f..053d1cef6e13 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -162,27 +162,27 @@ static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
return v9fs_xattr_set(dentry, full_name, value, size, flags);
}
-static struct xattr_handler v9fs_xattr_user_handler = {
+static const struct xattr_handler v9fs_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
-static struct xattr_handler v9fs_xattr_trusted_handler = {
+static const struct xattr_handler v9fs_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
#ifdef CONFIG_9P_FS_SECURITY
-static struct xattr_handler v9fs_xattr_security_handler = {
+static const struct xattr_handler v9fs_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = v9fs_xattr_handler_get,
.set = v9fs_xattr_handler_set,
};
#endif
-const struct xattr_handler *v9fs_xattr_handlers[] = {
+const struct xattr_handler * const v9fs_xattr_handlers[] = {
&v9fs_xattr_user_handler,
&v9fs_xattr_trusted_handler,
#ifdef CONFIG_9P_FS_SECURITY
diff --git a/fs/9p/xattr.h b/fs/9p/xattr.h
index b5636e544c8a..3ad5a802352a 100644
--- a/fs/9p/xattr.h
+++ b/fs/9p/xattr.h
@@ -10,7 +10,7 @@
#include <net/9p/9p.h>
#include <net/9p/client.h>
-extern const struct xattr_handler *v9fs_xattr_handlers[];
+extern const struct xattr_handler * const v9fs_xattr_handlers[];
ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
void *buffer, size_t buffer_size);
diff --git a/fs/Kconfig b/fs/Kconfig
index aa7e03cc1941..0d6cb927872a 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -48,6 +48,7 @@ source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
source "fs/f2fs/Kconfig"
+source "fs/bcachefs/Kconfig"
source "fs/zonefs/Kconfig"
endif # BLOCK
diff --git a/fs/Makefile b/fs/Makefile
index f9541f40be4e..75522f88e763 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
obj-$(CONFIG_F2FS_FS) += f2fs/
+obj-$(CONFIG_BCACHEFS_FS) += bcachefs/
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 20963002578a..3081edb09e46 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -242,6 +242,7 @@ struct inode *
adfs_iget(struct super_block *sb, struct object_info *obj)
{
struct inode *inode;
+ struct timespec64 ts;
inode = new_inode(sb);
if (!inode)
@@ -268,9 +269,10 @@ adfs_iget(struct super_block *sb, struct object_info *obj)
ADFS_I(inode)->attr = obj->attr;
inode->i_mode = adfs_atts2mode(sb, inode);
- adfs_adfs2unix_time(&inode->i_mtime, inode);
- inode->i_atime = inode->i_mtime;
- inode_set_ctime_to_ts(inode, inode->i_mtime);
+ adfs_adfs2unix_time(&ts, inode);
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
+ inode_set_ctime_to_ts(inode, ts);
if (S_ISDIR(inode->i_mode)) {
inode->i_op = &adfs_dir_inode_operations;
@@ -321,7 +323,8 @@ adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
if (ia_valid & ATTR_MTIME && adfs_inode_is_stamped(inode)) {
adfs_unix2adfs_time(inode, &attr->ia_mtime);
- adfs_adfs2unix_time(&inode->i_mtime, inode);
+ adfs_adfs2unix_time(&attr->ia_mtime, inode);
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
}
/*
@@ -329,7 +332,7 @@ adfs_notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
* have the ability to represent them in our filesystem?
*/
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 7ba93efc1143..fd669daa4e7b 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -60,7 +60,7 @@ affs_insert_hash(struct inode *dir, struct buffer_head *bh)
mark_buffer_dirty_inode(dir_bh, dir);
affs_brelse(dir_bh);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
mark_inode_dirty(dir);
@@ -114,7 +114,7 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
affs_brelse(bh);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
mark_inode_dirty(dir);
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 060746c63151..0210df8d3500 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -149,13 +149,9 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino)
break;
}
- inode->i_mtime.tv_sec = inode->i_atime.tv_sec =
- inode_set_ctime(inode,
- (be32_to_cpu(tail->change.days) * 86400LL +
- be32_to_cpu(tail->change.mins) * 60 +
- be32_to_cpu(tail->change.ticks) / 50 + AFFS_EPOCH_DELTA)
- + sys_tz.tz_minuteswest * 60, 0).tv_sec;
- inode->i_mtime.tv_nsec = inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode,
+ inode_set_atime(inode, inode_set_ctime(inode, (be32_to_cpu(tail->change.days) * 86400LL + be32_to_cpu(tail->change.mins) * 60 + be32_to_cpu(tail->change.ticks) / 50 + AFFS_EPOCH_DELTA) + sys_tz.tz_minuteswest * 60, 0).tv_sec, 0).tv_sec,
+ 0);
affs_brelse(bh);
unlock_new_inode(inode);
return inode;
@@ -187,12 +183,13 @@ affs_write_inode(struct inode *inode, struct writeback_control *wbc)
}
tail = AFFS_TAIL(sb, bh);
if (tail->stype == cpu_to_be32(ST_ROOT)) {
- affs_secs_to_datestamp(inode->i_mtime.tv_sec,
+ affs_secs_to_datestamp(inode_get_mtime_sec(inode),
&AFFS_ROOT_TAIL(sb, bh)->root_change);
} else {
tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect);
tail->size = cpu_to_be32(inode->i_size);
- affs_secs_to_datestamp(inode->i_mtime.tv_sec, &tail->change);
+ affs_secs_to_datestamp(inode_get_mtime_sec(inode),
+ &tail->change);
if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) {
uid = i_uid_read(inode);
gid = i_gid_read(inode);
@@ -314,7 +311,7 @@ affs_new_inode(struct inode *dir)
inode->i_gid = current_fsgid();
inode->i_ino = block;
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
atomic_set(&AFFS_I(inode)->i_opencnt, 0);
AFFS_I(inode)->i_blkcnt = 0;
AFFS_I(inode)->i_lc = NULL;
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 95bcbd7654d1..4d04ef2d3ae7 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -88,7 +88,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
set_nlink(inode, 2);
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
inode->i_generation = 0;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 1c794a1896aa..78efc9719349 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -91,8 +91,8 @@ static int afs_inode_init_from_status(struct afs_operation *op,
t = status->mtime_client;
inode_set_ctime_to_ts(inode, t);
- inode->i_mtime = t;
- inode->i_atime = t;
+ inode_set_mtime_to_ts(inode, t);
+ inode_set_atime_to_ts(inode, t);
inode->i_flags |= S_NOATIME;
inode->i_uid = make_kuid(&init_user_ns, status->owner);
inode->i_gid = make_kgid(&init_user_ns, status->group);
@@ -204,7 +204,7 @@ static void afs_apply_status(struct afs_operation *op,
}
t = status->mtime_client;
- inode->i_mtime = t;
+ inode_set_mtime_to_ts(inode, t);
if (vp->update_ctime)
inode_set_ctime_to_ts(inode, op->ctime);
@@ -253,7 +253,7 @@ static void afs_apply_status(struct afs_operation *op,
if (change_size) {
afs_set_i_size(vnode, status->size);
inode_set_ctime_to_ts(inode, t);
- inode->i_atime = t;
+ inode_set_atime_to_ts(inode, t);
}
}
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index da73b97e19a9..23e2cc4efe41 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -1541,7 +1541,7 @@ int afs_launder_folio(struct folio *);
/*
* xattr.c
*/
-extern const struct xattr_handler *afs_xattr_handlers[];
+extern const struct xattr_handler * const afs_xattr_handlers[];
/*
* yfsclient.c
diff --git a/fs/afs/write.c b/fs/afs/write.c
index e1c45341719b..4a168781936b 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -424,7 +424,7 @@ try_next_key:
op->store.write_iter = iter;
op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
- op->mtime = vnode->netfs.inode.i_mtime;
+ op->mtime = inode_get_mtime(&vnode->netfs.inode);
afs_wait_for_operation(op);
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 9048d8ccc715..64b2c0224f62 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -353,7 +353,7 @@ static const struct xattr_handler afs_xattr_afs_volume_handler = {
.get = afs_xattr_get_volume,
};
-const struct xattr_handler *afs_xattr_handlers[] = {
+const struct xattr_handler * const afs_xattr_handlers[] = {
&afs_xattr_afs_acl_handler,
&afs_xattr_afs_cell_handler,
&afs_xattr_afs_fid_handler,
diff --git a/fs/attr.c b/fs/attr.c
index a8ae5f6d9b16..bdf5deb06ea9 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -308,9 +308,9 @@ void setattr_copy(struct mnt_idmap *idmap, struct inode *inode,
i_uid_update(idmap, attr, inode);
i_gid_update(idmap, attr, inode);
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index d5a44fa88acf..8c1d587b3eef 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -25,6 +25,8 @@
#include <linux/completion.h>
#include <linux/file.h>
#include <linux/magic.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -205,20 +207,34 @@ static inline void managed_dentry_clear_managed(struct dentry *dentry)
/* Initializing function */
-int autofs_fill_super(struct super_block *, void *, int);
+extern const struct fs_parameter_spec autofs_param_specs[];
+int autofs_init_fs_context(struct fs_context *fc);
struct autofs_info *autofs_new_ino(struct autofs_sb_info *);
void autofs_clean_ino(struct autofs_info *);
-static inline int autofs_prepare_pipe(struct file *pipe)
+static inline int autofs_check_pipe(struct file *pipe)
{
if (!(pipe->f_mode & FMODE_CAN_WRITE))
return -EINVAL;
if (!S_ISFIFO(file_inode(pipe)->i_mode))
return -EINVAL;
+ return 0;
+}
+
+static inline void autofs_set_packet_pipe_flags(struct file *pipe)
+{
/* We want a packet pipe */
pipe->f_flags |= O_DIRECT;
/* We don't expect -EAGAIN */
pipe->f_flags &= ~O_NONBLOCK;
+}
+
+static inline int autofs_prepare_pipe(struct file *pipe)
+{
+ int ret = autofs_check_pipe(pipe);
+ if (ret < 0)
+ return ret;
+ autofs_set_packet_pipe_flags(pipe);
return 0;
}
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
index d3f55e874338..b5e4dfa04ed0 100644
--- a/fs/autofs/init.c
+++ b/fs/autofs/init.c
@@ -7,16 +7,11 @@
#include <linux/init.h>
#include "autofs_i.h"
-static struct dentry *autofs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_nodev(fs_type, flags, data, autofs_fill_super);
-}
-
struct file_system_type autofs_fs_type = {
.owner = THIS_MODULE,
.name = "autofs",
- .mount = autofs_mount,
+ .init_fs_context = autofs_init_fs_context,
+ .parameters = autofs_param_specs,
.kill_sb = autofs_kill_sb,
};
MODULE_ALIAS_FS("autofs");
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 2b49662ed237..a5083d447a62 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -6,7 +6,6 @@
#include <linux/seq_file.h>
#include <linux/pagemap.h>
-#include <linux/parser.h>
#include "autofs_i.h"
@@ -110,189 +109,179 @@ static const struct super_operations autofs_sops = {
.evict_inode = autofs_evict_inode,
};
-enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto,
- Opt_indirect, Opt_direct, Opt_offset, Opt_strictexpire,
- Opt_ignore};
-
-static const match_table_t tokens = {
- {Opt_fd, "fd=%u"},
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_pgrp, "pgrp=%u"},
- {Opt_minproto, "minproto=%u"},
- {Opt_maxproto, "maxproto=%u"},
- {Opt_indirect, "indirect"},
- {Opt_direct, "direct"},
- {Opt_offset, "offset"},
- {Opt_strictexpire, "strictexpire"},
- {Opt_ignore, "ignore"},
- {Opt_err, NULL}
+enum {
+ Opt_direct,
+ Opt_fd,
+ Opt_gid,
+ Opt_ignore,
+ Opt_indirect,
+ Opt_maxproto,
+ Opt_minproto,
+ Opt_offset,
+ Opt_pgrp,
+ Opt_strictexpire,
+ Opt_uid,
};
-static int parse_options(char *options,
- struct inode *root, int *pgrp, bool *pgrp_set,
- struct autofs_sb_info *sbi)
+const struct fs_parameter_spec autofs_param_specs[] = {
+ fsparam_flag ("direct", Opt_direct),
+ fsparam_fd ("fd", Opt_fd),
+ fsparam_u32 ("gid", Opt_gid),
+ fsparam_flag ("ignore", Opt_ignore),
+ fsparam_flag ("indirect", Opt_indirect),
+ fsparam_u32 ("maxproto", Opt_maxproto),
+ fsparam_u32 ("minproto", Opt_minproto),
+ fsparam_flag ("offset", Opt_offset),
+ fsparam_u32 ("pgrp", Opt_pgrp),
+ fsparam_flag ("strictexpire", Opt_strictexpire),
+ fsparam_u32 ("uid", Opt_uid),
+ {}
+};
+
+struct autofs_fs_context {
+ kuid_t uid;
+ kgid_t gid;
+ int pgrp;
+ bool pgrp_set;
+};
+
+/*
+ * Open the fd. We do it here rather than in get_tree so that it's done in the
+ * context of the system call that passed the data and not the one that
+ * triggered the superblock creation, lest the fd gets reassigned.
+ */
+static int autofs_parse_fd(struct fs_context *fc, struct autofs_sb_info *sbi,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- int pipefd = -1;
- kuid_t uid;
- kgid_t gid;
+ struct file *pipe;
+ int ret;
- root->i_uid = current_uid();
- root->i_gid = current_gid();
+ if (param->type == fs_value_is_file) {
+ /* came through the new api */
+ pipe = param->file;
+ param->file = NULL;
+ } else {
+ pipe = fget(result->uint_32);
+ }
+ if (!pipe) {
+ errorf(fc, "could not open pipe file descriptor");
+ return -EBADF;
+ }
- sbi->min_proto = AUTOFS_MIN_PROTO_VERSION;
- sbi->max_proto = AUTOFS_MAX_PROTO_VERSION;
+ ret = autofs_check_pipe(pipe);
+ if (ret < 0) {
+ errorf(fc, "Invalid/unusable pipe");
+ if (param->type != fs_value_is_file)
+ fput(pipe);
+ return -EBADF;
+ }
- sbi->pipefd = -1;
+ autofs_set_packet_pipe_flags(pipe);
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
-
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_fd:
- if (match_int(args, &pipefd))
- return 1;
- sbi->pipefd = pipefd;
- break;
- case Opt_uid:
- if (match_int(args, &option))
- return 1;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return 1;
- root->i_uid = uid;
- break;
- case Opt_gid:
- if (match_int(args, &option))
- return 1;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return 1;
- root->i_gid = gid;
- break;
- case Opt_pgrp:
- if (match_int(args, &option))
- return 1;
- *pgrp = option;
- *pgrp_set = true;
- break;
- case Opt_minproto:
- if (match_int(args, &option))
- return 1;
- sbi->min_proto = option;
- break;
- case Opt_maxproto:
- if (match_int(args, &option))
- return 1;
- sbi->max_proto = option;
- break;
- case Opt_indirect:
- set_autofs_type_indirect(&sbi->type);
- break;
- case Opt_direct:
- set_autofs_type_direct(&sbi->type);
- break;
- case Opt_offset:
- set_autofs_type_offset(&sbi->type);
- break;
- case Opt_strictexpire:
- sbi->flags |= AUTOFS_SBI_STRICTEXPIRE;
- break;
- case Opt_ignore:
- sbi->flags |= AUTOFS_SBI_IGNORE;
- break;
- default:
- return 1;
- }
+ if (sbi->pipe)
+ fput(sbi->pipe);
+
+ sbi->pipefd = result->uint_32;
+ sbi->pipe = pipe;
+
+ return 0;
+}
+
+static int autofs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+ struct fs_parse_result result;
+ kuid_t uid;
+ kgid_t gid;
+ int opt;
+
+ opt = fs_parse(fc, autofs_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_fd:
+ return autofs_parse_fd(fc, sbi, param, &result);
+ case Opt_uid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ return invalfc(fc, "Invalid uid");
+ ctx->uid = uid;
+ break;
+ case Opt_gid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ return invalfc(fc, "Invalid gid");
+ ctx->gid = gid;
+ break;
+ case Opt_pgrp:
+ ctx->pgrp = result.uint_32;
+ ctx->pgrp_set = true;
+ break;
+ case Opt_minproto:
+ sbi->min_proto = result.uint_32;
+ break;
+ case Opt_maxproto:
+ sbi->max_proto = result.uint_32;
+ break;
+ case Opt_indirect:
+ set_autofs_type_indirect(&sbi->type);
+ break;
+ case Opt_direct:
+ set_autofs_type_direct(&sbi->type);
+ break;
+ case Opt_offset:
+ set_autofs_type_offset(&sbi->type);
+ break;
+ case Opt_strictexpire:
+ sbi->flags |= AUTOFS_SBI_STRICTEXPIRE;
+ break;
+ case Opt_ignore:
+ sbi->flags |= AUTOFS_SBI_IGNORE;
}
- return (sbi->pipefd < 0);
+
+ return 0;
}
-int autofs_fill_super(struct super_block *s, void *data, int silent)
+static struct autofs_sb_info *autofs_alloc_sbi(void)
{
- struct inode *root_inode;
- struct dentry *root;
- struct file *pipe;
struct autofs_sb_info *sbi;
- struct autofs_info *ino;
- int pgrp = 0;
- bool pgrp_set = false;
- int ret = -EINVAL;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- return -ENOMEM;
- pr_debug("starting up, sbi = %p\n", sbi);
+ return NULL;
- s->s_fs_info = sbi;
sbi->magic = AUTOFS_SBI_MAGIC;
- sbi->pipefd = -1;
- sbi->pipe = NULL;
- sbi->exp_timeout = 0;
- sbi->oz_pgrp = NULL;
- sbi->sb = s;
- sbi->version = 0;
- sbi->sub_version = 0;
sbi->flags = AUTOFS_SBI_CATATONIC;
+ sbi->min_proto = AUTOFS_MIN_PROTO_VERSION;
+ sbi->max_proto = AUTOFS_MAX_PROTO_VERSION;
+ sbi->pipefd = -1;
+
set_autofs_type_indirect(&sbi->type);
- sbi->min_proto = 0;
- sbi->max_proto = 0;
mutex_init(&sbi->wq_mutex);
mutex_init(&sbi->pipe_mutex);
spin_lock_init(&sbi->fs_lock);
- sbi->queues = NULL;
spin_lock_init(&sbi->lookup_lock);
INIT_LIST_HEAD(&sbi->active_list);
INIT_LIST_HEAD(&sbi->expiring_list);
- s->s_blocksize = 1024;
- s->s_blocksize_bits = 10;
- s->s_magic = AUTOFS_SUPER_MAGIC;
- s->s_op = &autofs_sops;
- s->s_d_op = &autofs_dentry_operations;
- s->s_time_gran = 1;
- /*
- * Get the root inode and dentry, but defer checking for errors.
- */
- ino = autofs_new_ino(sbi);
- if (!ino) {
- ret = -ENOMEM;
- goto fail_free;
- }
- root_inode = autofs_get_inode(s, S_IFDIR | 0755);
- root = d_make_root(root_inode);
- if (!root) {
- ret = -ENOMEM;
- goto fail_ino;
- }
- pipe = NULL;
-
- root->d_fsdata = ino;
+ return sbi;
+}
- /* Can this call block? */
- if (parse_options(data, root_inode, &pgrp, &pgrp_set, sbi)) {
- pr_err("called with bogus options\n");
- goto fail_dput;
- }
+static int autofs_validate_protocol(struct fs_context *fc)
+{
+ struct autofs_sb_info *sbi = fc->s_fs_info;
/* Test versions first */
if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
- pr_err("kernel does not match daemon version "
+ errorf(fc, "kernel does not match daemon version "
"daemon (%d, %d) kernel (%d, %d)\n",
sbi->min_proto, sbi->max_proto,
AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
- goto fail_dput;
+ return -EINVAL;
}
/* Establish highest kernel protocol version */
@@ -300,13 +289,62 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
sbi->version = AUTOFS_MAX_PROTO_VERSION;
else
sbi->version = sbi->max_proto;
- sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
- if (pgrp_set) {
- sbi->oz_pgrp = find_get_pid(pgrp);
+ switch (sbi->version) {
+ case 4:
+ sbi->sub_version = 7;
+ break;
+ case 5:
+ sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
+ break;
+ default:
+ sbi->sub_version = 0;
+ }
+
+ return 0;
+}
+
+static int autofs_fill_super(struct super_block *s, struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = s->s_fs_info;
+ struct inode *root_inode;
+ struct dentry *root;
+ struct autofs_info *ino;
+ int ret = -ENOMEM;
+
+ pr_debug("starting up, sbi = %p\n", sbi);
+
+ sbi->sb = s;
+ s->s_blocksize = 1024;
+ s->s_blocksize_bits = 10;
+ s->s_magic = AUTOFS_SUPER_MAGIC;
+ s->s_op = &autofs_sops;
+ s->s_d_op = &autofs_dentry_operations;
+ s->s_time_gran = 1;
+
+ /*
+ * Get the root inode and dentry, but defer checking for errors.
+ */
+ ino = autofs_new_ino(sbi);
+ if (!ino)
+ goto fail;
+
+ root_inode = autofs_get_inode(s, S_IFDIR | 0755);
+ root_inode->i_uid = ctx->uid;
+ root_inode->i_gid = ctx->gid;
+
+ root = d_make_root(root_inode);
+ if (!root)
+ goto fail_ino;
+
+ root->d_fsdata = ino;
+
+ if (ctx->pgrp_set) {
+ sbi->oz_pgrp = find_get_pid(ctx->pgrp);
if (!sbi->oz_pgrp) {
- pr_err("could not find process group %d\n",
- pgrp);
+ ret = invalf(fc, "Could not find process group %d",
+ ctx->pgrp);
goto fail_dput;
}
} else {
@@ -321,16 +359,7 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
pr_debug("pipe fd = %d, pgrp = %u\n",
sbi->pipefd, pid_nr(sbi->oz_pgrp));
- pipe = fget(sbi->pipefd);
- if (!pipe) {
- pr_err("could not open pipe file descriptor\n");
- goto fail_put_pid;
- }
- ret = autofs_prepare_pipe(pipe);
- if (ret < 0)
- goto fail_fput;
- sbi->pipe = pipe;
sbi->flags &= ~AUTOFS_SBI_CATATONIC;
/*
@@ -342,22 +371,82 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
/*
* Failure ... clean up.
*/
-fail_fput:
- pr_err("pipe file descriptor does not contain proper ops\n");
- fput(pipe);
-fail_put_pid:
- put_pid(sbi->oz_pgrp);
fail_dput:
dput(root);
- goto fail_free;
+ goto fail;
fail_ino:
autofs_free_ino(ino);
-fail_free:
- kfree(sbi);
- s->s_fs_info = NULL;
+fail:
return ret;
}
+/*
+ * Validate the parameters and then request a superblock.
+ */
+static int autofs_get_tree(struct fs_context *fc)
+{
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+ int ret;
+
+ ret = autofs_validate_protocol(fc);
+ if (ret)
+ return ret;
+
+ if (sbi->pipefd < 0)
+ return invalf(fc, "No control pipe specified");
+
+ return get_tree_nodev(fc, autofs_fill_super);
+}
+
+static void autofs_free_fc(struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx = fc->fs_private;
+ struct autofs_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) {
+ if (sbi->pipe)
+ fput(sbi->pipe);
+ kfree(sbi);
+ }
+ kfree(ctx);
+}
+
+static const struct fs_context_operations autofs_context_ops = {
+ .free = autofs_free_fc,
+ .parse_param = autofs_parse_param,
+ .get_tree = autofs_get_tree,
+};
+
+/*
+ * Set up the filesystem mount context.
+ */
+int autofs_init_fs_context(struct fs_context *fc)
+{
+ struct autofs_fs_context *ctx;
+ struct autofs_sb_info *sbi;
+
+ ctx = kzalloc(sizeof(struct autofs_fs_context), GFP_KERNEL);
+ if (!ctx)
+ goto nomem;
+
+ ctx->uid = current_uid();
+ ctx->gid = current_gid();
+
+ sbi = autofs_alloc_sbi();
+ if (!sbi)
+ goto nomem_ctx;
+
+ fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
+ fc->ops = &autofs_context_ops;
+ return 0;
+
+nomem_ctx:
+ kfree(ctx);
+nomem:
+ return -ENOMEM;
+}
+
struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
{
struct inode *inode = new_inode(sb);
@@ -370,7 +459,7 @@ struct inode *autofs_get_inode(struct super_block *sb, umode_t mode)
inode->i_uid = d_inode(sb->s_root)->i_uid;
inode->i_gid = d_inode(sb->s_root)->i_gid;
}
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_ino = get_next_ino();
if (S_ISDIR(mode)) {
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 512b9a26c63d..530d18827e35 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -600,7 +600,7 @@ static int autofs_dir_symlink(struct mnt_idmap *idmap,
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return 0;
}
@@ -633,7 +633,7 @@ static int autofs_dir_unlink(struct inode *dir, struct dentry *dentry)
d_inode(dentry)->i_size = 0;
clear_nlink(d_inode(dentry));
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
spin_lock(&sbi->lookup_lock);
__autofs_add_expiring(dentry);
@@ -749,7 +749,7 @@ static int autofs_dir_mkdir(struct mnt_idmap *idmap,
p_ino = autofs_dentry_ino(dentry->d_parent);
p_ino->count++;
inc_nlink(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return 0;
}
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 83f9566c973b..316d88da2ce1 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -208,7 +208,7 @@ void make_bad_inode(struct inode *inode)
remove_inode_hash(inode);
inode->i_mode = S_IFREG;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &bad_inode_ops;
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &bad_file_ops;
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
new file mode 100644
index 000000000000..df13a4f9a6e3
--- /dev/null
+++ b/fs/bcachefs/Kconfig
@@ -0,0 +1,85 @@
+
+config BCACHEFS_FS
+ tristate "bcachefs filesystem support (EXPERIMENTAL)"
+ depends on BLOCK
+ select EXPORTFS
+ select CLOSURES
+ select LIBCRC32C
+ select CRC64
+ select FS_POSIX_ACL
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+ select LZ4HC_COMPRESS
+ select LZ4HC_DECOMPRESS
+ select ZLIB_DEFLATE
+ select ZLIB_INFLATE
+ select ZSTD_COMPRESS
+ select ZSTD_DECOMPRESS
+ select CRYPTO_SHA256
+ select CRYPTO_CHACHA20
+ select CRYPTO_POLY1305
+ select KEYS
+ select RAID6_PQ
+ select XOR_BLOCKS
+ select XXHASH
+ select SRCU
+ select SYMBOLIC_ERRNAME
+ select MEAN_AND_VARIANCE
+ help
+ The bcachefs filesystem - a modern, copy on write filesystem, with
+ support for multiple devices, compression, checksumming, etc.
+
+config BCACHEFS_QUOTA
+ bool "bcachefs quota support"
+ depends on BCACHEFS_FS
+ select QUOTACTL
+
+config BCACHEFS_POSIX_ACL
+ bool "bcachefs POSIX ACL support"
+ depends on BCACHEFS_FS
+ select FS_POSIX_ACL
+
+config BCACHEFS_DEBUG_TRANSACTIONS
+ bool "bcachefs runtime info"
+ depends on BCACHEFS_FS
+ default y
+ help
+ This makes the list of running btree transactions available in debugfs.
+
+ This is a highly useful debugging feature but does add a small amount of overhead.
+
+config BCACHEFS_DEBUG
+ bool "bcachefs debugging"
+ depends on BCACHEFS_FS
+ help
+ Enables many extra debugging checks and assertions.
+
+ The resulting code will be significantly slower than normal; you
+ probably shouldn't select this option unless you're a developer.
+
+config BCACHEFS_TESTS
+ bool "bcachefs unit and performance tests"
+ depends on BCACHEFS_FS
+ help
+ Include some unit and performance tests for the core btree code
+
+config BCACHEFS_LOCK_TIME_STATS
+ bool "bcachefs lock time statistics"
+ depends on BCACHEFS_FS
+ help
+ Expose statistics for how long we held a lock in debugfs
+
+config BCACHEFS_NO_LATENCY_ACCT
+ bool "disable latency accounting and time stats"
+ depends on BCACHEFS_FS
+ help
+ This disables device latency tracking and time stats, only for performance testing
+
+config MEAN_AND_VARIANCE_UNIT_TEST
+ tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select MEAN_AND_VARIANCE
+ default KUNIT_ALL_TESTS
+ help
+ This option enables the kunit tests for mean_and_variance module.
+ If unsure, say N.
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
new file mode 100644
index 000000000000..0749731b9072
--- /dev/null
+++ b/fs/bcachefs/Makefile
@@ -0,0 +1,88 @@
+
+obj-$(CONFIG_BCACHEFS_FS) += bcachefs.o
+
+bcachefs-y := \
+ acl.o \
+ alloc_background.o \
+ alloc_foreground.o \
+ backpointers.o \
+ bkey.o \
+ bkey_methods.o \
+ bkey_sort.o \
+ bset.o \
+ btree_cache.o \
+ btree_gc.o \
+ btree_io.o \
+ btree_iter.o \
+ btree_journal_iter.o \
+ btree_key_cache.o \
+ btree_locking.o \
+ btree_trans_commit.o \
+ btree_update.o \
+ btree_update_interior.o \
+ btree_write_buffer.o \
+ buckets.o \
+ buckets_waiting_for_journal.o \
+ chardev.o \
+ checksum.o \
+ clock.o \
+ compress.o \
+ counters.o \
+ debug.o \
+ dirent.o \
+ disk_groups.o \
+ data_update.o \
+ ec.o \
+ errcode.o \
+ error.o \
+ extents.o \
+ extent_update.o \
+ fs.o \
+ fs-common.o \
+ fs-ioctl.o \
+ fs-io.o \
+ fs-io-buffered.o \
+ fs-io-direct.o \
+ fs-io-pagecache.o \
+ fsck.o \
+ inode.o \
+ io_read.o \
+ io_misc.o \
+ io_write.o \
+ journal.o \
+ journal_io.o \
+ journal_reclaim.o \
+ journal_sb.o \
+ journal_seq_blacklist.o \
+ keylist.o \
+ logged_ops.o \
+ lru.o \
+ mean_and_variance.o \
+ migrate.o \
+ move.o \
+ movinggc.o \
+ nocow_locking.o \
+ opts.o \
+ printbuf.o \
+ quota.o \
+ rebalance.o \
+ recovery.o \
+ reflink.o \
+ replicas.o \
+ sb-clean.o \
+ sb-members.o \
+ siphash.o \
+ six.o \
+ snapshot.o \
+ subvolume.o \
+ super.o \
+ super-io.o \
+ sysfs.o \
+ tests.o \
+ trace.o \
+ two_state_shared_lock.o \
+ util.o \
+ varint.o \
+ xattr.o
+
+obj-$(CONFIG_MEAN_AND_VARIANCE_UNIT_TEST) += mean_and_variance_test.o
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
new file mode 100644
index 000000000000..f3809897f00a
--- /dev/null
+++ b/fs/bcachefs/acl.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+
+#include "acl.h"
+#include "xattr.h"
+
+#include <linux/posix_acl.h>
+
+static const char * const acl_types[] = {
+ [ACL_USER_OBJ] = "user_obj",
+ [ACL_USER] = "user",
+ [ACL_GROUP_OBJ] = "group_obj",
+ [ACL_GROUP] = "group",
+ [ACL_MASK] = "mask",
+ [ACL_OTHER] = "other",
+ NULL,
+};
+
+void bch2_acl_to_text(struct printbuf *out, const void *value, size_t size)
+{
+ const void *p, *end = value + size;
+
+ if (!value ||
+ size < sizeof(bch_acl_header) ||
+ ((bch_acl_header *)value)->a_version != cpu_to_le32(BCH_ACL_VERSION))
+ return;
+
+ p = value + sizeof(bch_acl_header);
+ while (p < end) {
+ const bch_acl_entry *in = p;
+ unsigned tag = le16_to_cpu(in->e_tag);
+
+ prt_str(out, acl_types[tag]);
+
+ switch (tag) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ p += sizeof(bch_acl_entry_short);
+ break;
+ case ACL_USER:
+ prt_printf(out, " uid %u", le32_to_cpu(in->e_id));
+ p += sizeof(bch_acl_entry);
+ break;
+ case ACL_GROUP:
+ prt_printf(out, " gid %u", le32_to_cpu(in->e_id));
+ p += sizeof(bch_acl_entry);
+ break;
+ }
+
+ prt_printf(out, " %o", le16_to_cpu(in->e_perm));
+
+ if (p != end)
+ prt_char(out, ' ');
+ }
+}
+
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+
+#include "fs.h"
+
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+static inline size_t bch2_acl_size(unsigned nr_short, unsigned nr_long)
+{
+ return sizeof(bch_acl_header) +
+ sizeof(bch_acl_entry_short) * nr_short +
+ sizeof(bch_acl_entry) * nr_long;
+}
+
+static inline int acl_to_xattr_type(int type)
+{
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ return KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS;
+ case ACL_TYPE_DEFAULT:
+ return KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Convert from filesystem to in-memory representation.
+ */
+static struct posix_acl *bch2_acl_from_disk(struct btree_trans *trans,
+ const void *value, size_t size)
+{
+ const void *p, *end = value + size;
+ struct posix_acl *acl;
+ struct posix_acl_entry *out;
+ unsigned count = 0;
+ int ret;
+
+ if (!value)
+ return NULL;
+ if (size < sizeof(bch_acl_header))
+ goto invalid;
+ if (((bch_acl_header *)value)->a_version !=
+ cpu_to_le32(BCH_ACL_VERSION))
+ goto invalid;
+
+ p = value + sizeof(bch_acl_header);
+ while (p < end) {
+ const bch_acl_entry *entry = p;
+
+ if (p + sizeof(bch_acl_entry_short) > end)
+ goto invalid;
+
+ switch (le16_to_cpu(entry->e_tag)) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ p += sizeof(bch_acl_entry_short);
+ break;
+ case ACL_USER:
+ case ACL_GROUP:
+ p += sizeof(bch_acl_entry);
+ break;
+ default:
+ goto invalid;
+ }
+
+ count++;
+ }
+
+ if (p > end)
+ goto invalid;
+
+ if (!count)
+ return NULL;
+
+ acl = allocate_dropping_locks(trans, ret,
+ posix_acl_alloc(count, _gfp));
+ if (!acl)
+ return ERR_PTR(-ENOMEM);
+ if (ret) {
+ kfree(acl);
+ return ERR_PTR(ret);
+ }
+
+ out = acl->a_entries;
+
+ p = value + sizeof(bch_acl_header);
+ while (p < end) {
+ const bch_acl_entry *in = p;
+
+ out->e_tag = le16_to_cpu(in->e_tag);
+ out->e_perm = le16_to_cpu(in->e_perm);
+
+ switch (out->e_tag) {
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ p += sizeof(bch_acl_entry_short);
+ break;
+ case ACL_USER:
+ out->e_uid = make_kuid(&init_user_ns,
+ le32_to_cpu(in->e_id));
+ p += sizeof(bch_acl_entry);
+ break;
+ case ACL_GROUP:
+ out->e_gid = make_kgid(&init_user_ns,
+ le32_to_cpu(in->e_id));
+ p += sizeof(bch_acl_entry);
+ break;
+ }
+
+ out++;
+ }
+
+ BUG_ON(out != acl->a_entries + acl->a_count);
+
+ return acl;
+invalid:
+ pr_err("invalid acl entry");
+ return ERR_PTR(-EINVAL);
+}
+
+#define acl_for_each_entry(acl, acl_e) \
+ for (acl_e = acl->a_entries; \
+ acl_e < acl->a_entries + acl->a_count; \
+ acl_e++)
+
+/*
+ * Convert from in-memory to filesystem representation.
+ */
+static struct bkey_i_xattr *
+bch2_acl_to_xattr(struct btree_trans *trans,
+ const struct posix_acl *acl,
+ int type)
+{
+ struct bkey_i_xattr *xattr;
+ bch_acl_header *acl_header;
+ const struct posix_acl_entry *acl_e;
+ void *outptr;
+ unsigned nr_short = 0, nr_long = 0, acl_len, u64s;
+
+ acl_for_each_entry(acl, acl_e) {
+ switch (acl_e->e_tag) {
+ case ACL_USER:
+ case ACL_GROUP:
+ nr_long++;
+ break;
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ nr_short++;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ acl_len = bch2_acl_size(nr_short, nr_long);
+ u64s = BKEY_U64s + xattr_val_u64s(0, acl_len);
+
+ if (u64s > U8_MAX)
+ return ERR_PTR(-E2BIG);
+
+ xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
+ if (IS_ERR(xattr))
+ return xattr;
+
+ bkey_xattr_init(&xattr->k_i);
+ xattr->k.u64s = u64s;
+ xattr->v.x_type = acl_to_xattr_type(type);
+ xattr->v.x_name_len = 0;
+ xattr->v.x_val_len = cpu_to_le16(acl_len);
+
+ acl_header = xattr_val(&xattr->v);
+ acl_header->a_version = cpu_to_le32(BCH_ACL_VERSION);
+
+ outptr = (void *) acl_header + sizeof(*acl_header);
+
+ acl_for_each_entry(acl, acl_e) {
+ bch_acl_entry *entry = outptr;
+
+ entry->e_tag = cpu_to_le16(acl_e->e_tag);
+ entry->e_perm = cpu_to_le16(acl_e->e_perm);
+ switch (acl_e->e_tag) {
+ case ACL_USER:
+ entry->e_id = cpu_to_le32(
+ from_kuid(&init_user_ns, acl_e->e_uid));
+ outptr += sizeof(bch_acl_entry);
+ break;
+ case ACL_GROUP:
+ entry->e_id = cpu_to_le32(
+ from_kgid(&init_user_ns, acl_e->e_gid));
+ outptr += sizeof(bch_acl_entry);
+ break;
+
+ case ACL_USER_OBJ:
+ case ACL_GROUP_OBJ:
+ case ACL_MASK:
+ case ACL_OTHER:
+ outptr += sizeof(bch_acl_entry_short);
+ break;
+ }
+ }
+
+ BUG_ON(outptr != xattr_val(&xattr->v) + acl_len);
+
+ return xattr;
+}
+
+struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, int type)
+{
+ struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
+ struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter = { NULL };
+ struct bkey_s_c_xattr xattr;
+ struct posix_acl *acl = NULL;
+ struct bkey_s_c k;
+ int ret;
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
+ &hash, inode_inum(inode), &search, 0);
+ if (ret) {
+ if (!bch2_err_matches(ret, ENOENT))
+ acl = ERR_PTR(ret);
+ goto out;
+ }
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret) {
+ acl = ERR_PTR(ret);
+ goto out;
+ }
+
+ xattr = bkey_s_c_to_xattr(k);
+ acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
+ le16_to_cpu(xattr.v->x_val_len));
+
+ if (!IS_ERR(acl))
+ set_cached_acl(&inode->v, type, acl);
+out:
+ if (bch2_err_matches(PTR_ERR_OR_ZERO(acl), BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return acl;
+}
+
+int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
+ struct bch_inode_unpacked *inode_u,
+ struct posix_acl *acl, int type)
+{
+ struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode_u);
+ int ret;
+
+ if (type == ACL_TYPE_DEFAULT &&
+ !S_ISDIR(inode_u->bi_mode))
+ return acl ? -EACCES : 0;
+
+ if (acl) {
+ struct bkey_i_xattr *xattr =
+ bch2_acl_to_xattr(trans, acl, type);
+ if (IS_ERR(xattr))
+ return PTR_ERR(xattr);
+
+ ret = bch2_hash_set(trans, bch2_xattr_hash_desc, &hash_info,
+ inum, &xattr->k_i, 0);
+ } else {
+ struct xattr_search_key search =
+ X_SEARCH(acl_to_xattr_type(type), "", 0);
+
+ ret = bch2_hash_delete(trans, bch2_xattr_hash_desc, &hash_info,
+ inum, &search);
+ }
+
+ return bch2_err_matches(ret, ENOENT) ? 0 : ret;
+}
+
+int bch2_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry,
+ struct posix_acl *_acl, int type)
+{
+ struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter inode_iter = { NULL };
+ struct bch_inode_unpacked inode_u;
+ struct posix_acl *acl;
+ umode_t mode;
+ int ret;
+
+ mutex_lock(&inode->ei_update_lock);
+retry:
+ bch2_trans_begin(trans);
+ acl = _acl;
+
+ ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto btree_err;
+
+ mode = inode_u.bi_mode;
+
+ if (type == ACL_TYPE_ACCESS) {
+ ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
+ if (ret)
+ goto btree_err;
+ }
+
+ ret = bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type);
+ if (ret)
+ goto btree_err;
+
+ inode_u.bi_ctime = bch2_current_time(c);
+ inode_u.bi_mode = mode;
+
+ ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
+ bch2_trans_commit(trans, NULL, NULL, 0);
+btree_err:
+ bch2_trans_iter_exit(trans, &inode_iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ if (unlikely(ret))
+ goto err;
+
+ bch2_inode_update_after_write(trans, inode, &inode_u,
+ ATTR_CTIME|ATTR_MODE);
+
+ set_cached_acl(&inode->v, type, acl);
+err:
+ mutex_unlock(&inode->ei_update_lock);
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
+ struct bch_inode_unpacked *inode,
+ umode_t mode,
+ struct posix_acl **new_acl)
+{
+ struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
+ struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
+ struct btree_iter iter;
+ struct bkey_s_c_xattr xattr;
+ struct bkey_i_xattr *new;
+ struct posix_acl *acl = NULL;
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
+ &hash_info, inum, &search, BTREE_ITER_INTENT);
+ if (ret)
+ return bch2_err_matches(ret, ENOENT) ? 0 : ret;
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+ xattr = bkey_s_c_to_xattr(k);
+
+ acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
+ le16_to_cpu(xattr.v->x_val_len));
+ ret = PTR_ERR_OR_ZERO(acl);
+ if (IS_ERR_OR_NULL(acl))
+ goto err;
+
+ ret = allocate_dropping_locks_errcode(trans,
+ __posix_acl_chmod(&acl, _gfp, mode));
+ if (ret)
+ goto err;
+
+ new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
+ if (IS_ERR(new)) {
+ ret = PTR_ERR(new);
+ goto err;
+ }
+
+ new->k.p = iter.pos;
+ ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
+ *new_acl = acl;
+ acl = NULL;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ if (!IS_ERR_OR_NULL(acl))
+ kfree(acl);
+ return ret;
+}
+
+#endif /* CONFIG_BCACHEFS_POSIX_ACL */
diff --git a/fs/bcachefs/acl.h b/fs/bcachefs/acl.h
new file mode 100644
index 000000000000..27e7eec0f278
--- /dev/null
+++ b/fs/bcachefs/acl.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ACL_H
+#define _BCACHEFS_ACL_H
+
+struct bch_inode_unpacked;
+struct bch_hash_info;
+struct bch_inode_info;
+struct posix_acl;
+
+#define BCH_ACL_VERSION 0x0001
+
+typedef struct {
+ __le16 e_tag;
+ __le16 e_perm;
+ __le32 e_id;
+} bch_acl_entry;
+
+typedef struct {
+ __le16 e_tag;
+ __le16 e_perm;
+} bch_acl_entry_short;
+
+typedef struct {
+ __le32 a_version;
+} bch_acl_header;
+
+void bch2_acl_to_text(struct printbuf *, const void *, size_t);
+
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+
+struct posix_acl *bch2_get_acl(struct mnt_idmap *, struct dentry *, int);
+
+int bch2_set_acl_trans(struct btree_trans *, subvol_inum,
+ struct bch_inode_unpacked *,
+ struct posix_acl *, int);
+int bch2_set_acl(struct mnt_idmap *, struct dentry *, struct posix_acl *, int);
+int bch2_acl_chmod(struct btree_trans *, subvol_inum,
+ struct bch_inode_unpacked *,
+ umode_t, struct posix_acl **);
+
+#else
+
+static inline int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
+ struct bch_inode_unpacked *inode_u,
+ struct posix_acl *acl, int type)
+{
+ return 0;
+}
+
+static inline int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
+ struct bch_inode_unpacked *inode,
+ umode_t mode,
+ struct posix_acl **new_acl)
+{
+ return 0;
+}
+
+#endif /* CONFIG_BCACHEFS_POSIX_ACL */
+
+#endif /* _BCACHEFS_ACL_H */
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
new file mode 100644
index 000000000000..2d516207e223
--- /dev/null
+++ b/fs/bcachefs/alloc_background.c
@@ -0,0 +1,2146 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "backpointers.h"
+#include "btree_cache.h"
+#include "btree_io.h"
+#include "btree_key_cache.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "btree_gc.h"
+#include "btree_write_buffer.h"
+#include "buckets.h"
+#include "buckets_waiting_for_journal.h"
+#include "clock.h"
+#include "debug.h"
+#include "ec.h"
+#include "error.h"
+#include "lru.h"
+#include "recovery.h"
+#include "trace.h"
+#include "varint.h"
+
+#include <linux/kthread.h>
+#include <linux/math64.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/task.h>
+#include <linux/sort.h>
+
+/* Persistent alloc info: */
+
+static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
+#define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
+struct bkey_alloc_unpacked {
+ u64 journal_seq;
+ u8 gen;
+ u8 oldest_gen;
+ u8 data_type;
+ bool need_discard:1;
+ bool need_inc_gen:1;
+#define x(_name, _bits) u##_bits _name;
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+};
+
+static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
+ const void **p, unsigned field)
+{
+ unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
+ u64 v;
+
+ if (!(a->fields & (1 << field)))
+ return 0;
+
+ switch (bytes) {
+ case 1:
+ v = *((const u8 *) *p);
+ break;
+ case 2:
+ v = le16_to_cpup(*p);
+ break;
+ case 4:
+ v = le32_to_cpup(*p);
+ break;
+ case 8:
+ v = le64_to_cpup(*p);
+ break;
+ default:
+ BUG();
+ }
+
+ *p += bytes;
+ return v;
+}
+
+static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
+{
+ const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
+ const void *d = in->data;
+ unsigned idx = 0;
+
+ out->gen = in->gen;
+
+#define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+}
+
+static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
+ const u8 *in = a.v->data;
+ const u8 *end = bkey_val_end(a);
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v;
+
+ out->gen = a.v->gen;
+ out->oldest_gen = a.v->oldest_gen;
+ out->data_type = a.v->data_type;
+
+#define x(_name, _bits) \
+ if (fieldnr < a.v->nr_fields) { \
+ ret = bch2_varint_decode_fast(in, end, &v); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v = 0; \
+ } \
+ out->_name = v; \
+ if (v != out->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+ return 0;
+}
+
+static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
+ const u8 *in = a.v->data;
+ const u8 *end = bkey_val_end(a);
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v;
+
+ out->gen = a.v->gen;
+ out->oldest_gen = a.v->oldest_gen;
+ out->data_type = a.v->data_type;
+ out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
+ out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
+ out->journal_seq = le64_to_cpu(a.v->journal_seq);
+
+#define x(_name, _bits) \
+ if (fieldnr < a.v->nr_fields) { \
+ ret = bch2_varint_decode_fast(in, end, &v); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v = 0; \
+ } \
+ out->_name = v; \
+ if (v != out->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_ALLOC_FIELDS_V2()
+#undef x
+ return 0;
+}
+
+static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
+{
+ struct bkey_alloc_unpacked ret = { .gen = 0 };
+
+ switch (k.k->type) {
+ case KEY_TYPE_alloc:
+ bch2_alloc_unpack_v1(&ret, k);
+ break;
+ case KEY_TYPE_alloc_v2:
+ bch2_alloc_unpack_v2(&ret, k);
+ break;
+ case KEY_TYPE_alloc_v3:
+ bch2_alloc_unpack_v3(&ret, k);
+ break;
+ }
+
+ return ret;
+}
+
+static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
+{
+ unsigned i, bytes = offsetof(struct bch_alloc, data);
+
+ for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
+ if (a->fields & (1 << i))
+ bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
+
+ return DIV_ROUND_UP(bytes, sizeof(u64));
+}
+
+int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
+
+ /* allow for unknown fields */
+ if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
+ prt_printf(err, "incorrect value size (%zu < %u)",
+ bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_alloc_unpacked u;
+
+ if (bch2_alloc_unpack_v2(&u, k)) {
+ prt_printf(err, "unpack error");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_alloc_unpacked u;
+
+ if (bch2_alloc_unpack_v3(&u, k)) {
+ prt_printf(err, "unpack error");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err)
+{
+ struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+
+ if (alloc_v4_u64s(a.v) > bkey_val_u64s(k.k)) {
+ prt_printf(err, "bad val size (%u > %zu)",
+ alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
+ prt_printf(err, "invalid backpointers_start");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
+ prt_printf(err, "invalid data type (got %u should be %u)",
+ a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ switch (a.v->data_type) {
+ case BCH_DATA_free:
+ case BCH_DATA_need_gc_gens:
+ case BCH_DATA_need_discard:
+ if (a.v->dirty_sectors ||
+ a.v->cached_sectors ||
+ a.v->stripe) {
+ prt_printf(err, "empty data type free but have data");
+ return -BCH_ERR_invalid_bkey;
+ }
+ break;
+ case BCH_DATA_sb:
+ case BCH_DATA_journal:
+ case BCH_DATA_btree:
+ case BCH_DATA_user:
+ case BCH_DATA_parity:
+ if (!a.v->dirty_sectors) {
+ prt_printf(err, "data_type %s but dirty_sectors==0",
+ bch2_data_types[a.v->data_type]);
+ return -BCH_ERR_invalid_bkey;
+ }
+ break;
+ case BCH_DATA_cached:
+ if (!a.v->cached_sectors ||
+ a.v->dirty_sectors ||
+ a.v->stripe) {
+ prt_printf(err, "data type inconsistency");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (!a.v->io_time[READ] &&
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs) {
+ prt_printf(err, "cached bucket with read_time == 0");
+ return -BCH_ERR_invalid_bkey;
+ }
+ break;
+ case BCH_DATA_stripe:
+ break;
+ }
+
+ return 0;
+}
+
+static inline u64 swab40(u64 x)
+{
+ return (((x & 0x00000000ffULL) << 32)|
+ ((x & 0x000000ff00ULL) << 16)|
+ ((x & 0x0000ff0000ULL) >> 0)|
+ ((x & 0x00ff000000ULL) >> 16)|
+ ((x & 0xff00000000ULL) >> 32));
+}
+
+void bch2_alloc_v4_swab(struct bkey_s k)
+{
+ struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
+ struct bch_backpointer *bp, *bps;
+
+ a->journal_seq = swab64(a->journal_seq);
+ a->flags = swab32(a->flags);
+ a->dirty_sectors = swab32(a->dirty_sectors);
+ a->cached_sectors = swab32(a->cached_sectors);
+ a->io_time[0] = swab64(a->io_time[0]);
+ a->io_time[1] = swab64(a->io_time[1]);
+ a->stripe = swab32(a->stripe);
+ a->nr_external_backpointers = swab32(a->nr_external_backpointers);
+
+ bps = alloc_v4_backpointers(a);
+ for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
+ bp->bucket_offset = swab40(bp->bucket_offset);
+ bp->bucket_len = swab32(bp->bucket_len);
+ bch2_bpos_swab(&bp->pos);
+ }
+}
+
+void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
+ unsigned i;
+
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "gen %u oldest_gen %u data_type %s",
+ a->gen, a->oldest_gen,
+ a->data_type < BCH_DATA_NR
+ ? bch2_data_types[a->data_type]
+ : "(invalid data type)");
+ prt_newline(out);
+ prt_printf(out, "journal_seq %llu", a->journal_seq);
+ prt_newline(out);
+ prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
+ prt_newline(out);
+ prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
+ prt_newline(out);
+ prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
+ prt_newline(out);
+ prt_printf(out, "cached_sectors %u", a->cached_sectors);
+ prt_newline(out);
+ prt_printf(out, "stripe %u", a->stripe);
+ prt_newline(out);
+ prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
+ prt_newline(out);
+ prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
+ prt_newline(out);
+ prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
+ prt_newline(out);
+ prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
+ prt_newline(out);
+ prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
+ prt_newline(out);
+
+ if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
+ struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
+ const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
+
+ prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
+ printbuf_indent_add(out, 2);
+
+ for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
+ prt_newline(out);
+ bch2_backpointer_to_text(out, &bps[i]);
+ }
+
+ printbuf_indent_sub(out, 2);
+ }
+
+ printbuf_indent_sub(out, 2);
+}
+
+void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
+{
+ if (k.k->type == KEY_TYPE_alloc_v4) {
+ void *src, *dst;
+
+ *out = *bkey_s_c_to_alloc_v4(k).v;
+
+ src = alloc_v4_backpointers(out);
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+ dst = alloc_v4_backpointers(out);
+
+ if (src < dst)
+ memset(src, 0, dst - src);
+
+ SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
+ } else {
+ struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
+
+ *out = (struct bch_alloc_v4) {
+ .journal_seq = u.journal_seq,
+ .flags = u.need_discard,
+ .gen = u.gen,
+ .oldest_gen = u.oldest_gen,
+ .data_type = u.data_type,
+ .stripe_redundancy = u.stripe_redundancy,
+ .dirty_sectors = u.dirty_sectors,
+ .cached_sectors = u.cached_sectors,
+ .io_time[READ] = u.read_time,
+ .io_time[WRITE] = u.write_time,
+ .stripe = u.stripe,
+ };
+
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+ }
+}
+
+static noinline struct bkey_i_alloc_v4 *
+__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
+{
+ struct bkey_i_alloc_v4 *ret;
+
+ ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
+ if (IS_ERR(ret))
+ return ret;
+
+ if (k.k->type == KEY_TYPE_alloc_v4) {
+ void *src, *dst;
+
+ bkey_reassemble(&ret->k_i, k);
+
+ src = alloc_v4_backpointers(&ret->v);
+ SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
+ dst = alloc_v4_backpointers(&ret->v);
+
+ if (src < dst)
+ memset(src, 0, dst - src);
+
+ SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
+ set_alloc_v4_u64s(ret);
+ } else {
+ bkey_alloc_v4_init(&ret->k_i);
+ ret->k.p = k.k->p;
+ bch2_alloc_to_v4(k, &ret->v);
+ }
+ return ret;
+}
+
+static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
+{
+ struct bkey_s_c_alloc_v4 a;
+
+ if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
+ ((a = bkey_s_c_to_alloc_v4(k), true) &&
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
+ return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
+
+ return __bch2_alloc_to_v4_mut(trans, k);
+}
+
+struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
+{
+ return bch2_alloc_to_v4_mut_inlined(trans, k);
+}
+
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos pos)
+{
+ struct bkey_s_c k;
+ struct bkey_i_alloc_v4 *a;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
+ BTREE_ITER_WITH_UPDATES|
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+
+ a = bch2_alloc_to_v4_mut_inlined(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (unlikely(ret))
+ goto err;
+ return a;
+err:
+ bch2_trans_iter_exit(trans, iter);
+ return ERR_PTR(ret);
+}
+
+static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
+{
+ *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
+
+ pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
+ return pos;
+}
+
+static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
+{
+ pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
+ pos.offset += offset;
+ return pos;
+}
+
+static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
+{
+ return k.k->type == KEY_TYPE_bucket_gens
+ ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
+ : 0;
+}
+
+int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
+ prt_printf(err, "bad val size (%zu != %zu)",
+ bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
+ if (i)
+ prt_char(out, ' ');
+ prt_printf(out, "%u", g.v->gens[i]);
+ }
+}
+
+int bch2_bucket_gens_init(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_alloc_v4 a;
+ struct bkey_i_bucket_gens g;
+ bool have_bucket_gens_key = false;
+ unsigned offset;
+ struct bpos pos;
+ u8 gen;
+ int ret;
+
+ for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ /*
+ * Not a fsck error because this is checked/repaired by
+ * bch2_check_alloc_key() which runs later:
+ */
+ if (!bch2_dev_bucket_exists(c, k.k->p))
+ continue;
+
+ gen = bch2_alloc_to_v4(k, &a)->gen;
+ pos = alloc_gens_pos(iter.pos, &offset);
+
+ if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
+ if (ret)
+ break;
+ have_bucket_gens_key = false;
+ }
+
+ if (!have_bucket_gens_key) {
+ bkey_bucket_gens_init(&g.k_i);
+ g.k.p = pos;
+ have_bucket_gens_key = true;
+ }
+
+ g.v.gens[offset] = gen;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (have_bucket_gens_key && !ret)
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
+
+ bch2_trans_put(trans);
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+int bch2_alloc_read(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_dev *ca;
+ int ret;
+
+ down_read(&c->gc_lock);
+
+ if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
+ const struct bch_bucket_gens *g;
+ u64 b;
+
+ for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+ u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
+
+ if (k.k->type != KEY_TYPE_bucket_gens)
+ continue;
+
+ g = bkey_s_c_to_bucket_gens(k).v;
+
+ /*
+ * Not a fsck error because this is checked/repaired by
+ * bch2_check_alloc_key() which runs later:
+ */
+ if (!bch2_dev_exists2(c, k.k->p.inode))
+ continue;
+
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+
+ for (b = max_t(u64, ca->mi.first_bucket, start);
+ b < min_t(u64, ca->mi.nbuckets, end);
+ b++)
+ *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ } else {
+ struct bch_alloc_v4 a;
+
+ for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ /*
+ * Not a fsck error because this is checked/repaired by
+ * bch2_check_alloc_key() which runs later:
+ */
+ if (!bch2_dev_bucket_exists(c, k.k->p))
+ continue;
+
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+
+ *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ bch2_trans_put(trans);
+ up_read(&c->gc_lock);
+
+ if (ret)
+ bch_err_fn(c, ret);
+
+ return ret;
+}
+
+/* Free space/discard btree: */
+
+static int bch2_bucket_do_index(struct btree_trans *trans,
+ struct bkey_s_c alloc_k,
+ const struct bch_alloc_v4 *a,
+ bool set)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
+ struct btree_iter iter;
+ struct bkey_s_c old;
+ struct bkey_i *k;
+ enum btree_id btree;
+ enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
+ enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ if (a->data_type != BCH_DATA_free &&
+ a->data_type != BCH_DATA_need_discard)
+ return 0;
+
+ k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
+ if (IS_ERR(k))
+ return PTR_ERR(k);
+
+ bkey_init(&k->k);
+ k->k.type = new_type;
+
+ switch (a->data_type) {
+ case BCH_DATA_free:
+ btree = BTREE_ID_freespace;
+ k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
+ bch2_key_resize(&k->k, 1);
+ break;
+ case BCH_DATA_need_discard:
+ btree = BTREE_ID_need_discard;
+ k->k.p = alloc_k.k->p;
+ break;
+ default:
+ return 0;
+ }
+
+ old = bch2_bkey_get_iter(trans, &iter, btree,
+ bkey_start_pos(&k->k),
+ BTREE_ITER_INTENT);
+ ret = bkey_err(old);
+ if (ret)
+ return ret;
+
+ if (ca->mi.freespace_initialized &&
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
+ bch2_trans_inconsistent_on(old.k->type != old_type, trans,
+ "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
+ " for %s",
+ set ? "setting" : "clearing",
+ bch2_btree_ids[btree],
+ iter.pos.inode,
+ iter.pos.offset,
+ bch2_bkey_types[old.k->type],
+ bch2_bkey_types[old_type],
+ (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+ ret = -EIO;
+ goto err;
+ }
+
+ ret = bch2_trans_update(trans, &iter, k, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
+ struct bpos bucket, u8 gen)
+{
+ struct btree_iter iter;
+ unsigned offset;
+ struct bpos pos = alloc_gens_pos(bucket, &offset);
+ struct bkey_i_bucket_gens *g;
+ struct bkey_s_c k;
+ int ret;
+
+ g = bch2_trans_kmalloc(trans, sizeof(*g));
+ ret = PTR_ERR_OR_ZERO(g);
+ if (ret)
+ return ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_WITH_UPDATES);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ if (k.k->type != KEY_TYPE_bucket_gens) {
+ bkey_bucket_gens_init(&g->k_i);
+ g->k.p = iter.pos;
+ } else {
+ bkey_reassemble(&g->k_i, k);
+ }
+
+ g->v.gens[offset] = gen;
+
+ ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_trans_mark_alloc(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_alloc_v4 old_a_convert, *new_a;
+ const struct bch_alloc_v4 *old_a;
+ u64 old_lru, new_lru;
+ int ret = 0;
+
+ /*
+ * Deletion only happens in the device removal path, with
+ * BTREE_TRIGGER_NORUN:
+ */
+ BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
+
+ old_a = bch2_alloc_to_v4(old, &old_a_convert);
+ new_a = &bkey_i_to_alloc_v4(new)->v;
+
+ new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
+
+ if (new_a->dirty_sectors > old_a->dirty_sectors ||
+ new_a->cached_sectors > old_a->cached_sectors) {
+ new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+ new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
+ SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
+ }
+
+ if (data_type_is_empty(new_a->data_type) &&
+ BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
+ !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
+ new_a->gen++;
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+ }
+
+ if (old_a->data_type != new_a->data_type ||
+ (new_a->data_type == BCH_DATA_free &&
+ alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
+ ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
+ bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
+ if (ret)
+ return ret;
+ }
+
+ if (new_a->data_type == BCH_DATA_cached &&
+ !new_a->io_time[READ])
+ new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+
+ old_lru = alloc_lru_idx_read(*old_a);
+ new_lru = alloc_lru_idx_read(*new_a);
+
+ if (old_lru != new_lru) {
+ ret = bch2_lru_change(trans, new->k.p.inode,
+ bucket_to_u64(new->k.p),
+ old_lru, new_lru);
+ if (ret)
+ return ret;
+ }
+
+ new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
+ bch_dev_bkey_exists(c, new->k.p.inode));
+
+ if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
+ ret = bch2_lru_change(trans,
+ BCH_LRU_FRAGMENTATION_START,
+ bucket_to_u64(new->k.p),
+ old_a->fragmentation_lru, new_a->fragmentation_lru);
+ if (ret)
+ return ret;
+ }
+
+ if (old_a->gen != new_a->gen) {
+ ret = bch2_bucket_gen_update(trans, new->k.p, new_a->gen);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
+ * extents style btrees, but works on non-extents btrees:
+ */
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+{
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+
+ if (bkey_err(k))
+ return k;
+
+ if (k.k->type) {
+ return k;
+ } else {
+ struct btree_iter iter2;
+ struct bpos next;
+
+ bch2_trans_copy_iter(&iter2, iter);
+
+ if (!bpos_eq(iter->path->l[0].b->key.k.p, SPOS_MAX))
+ end = bkey_min(end, bpos_nosnap_successor(iter->path->l[0].b->key.k.p));
+
+ end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
+
+ /*
+ * btree node min/max is a closed interval, upto takes a half
+ * open interval:
+ */
+ k = bch2_btree_iter_peek_upto(&iter2, end);
+ next = iter2.pos;
+ bch2_trans_iter_exit(iter->trans, &iter2);
+
+ BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
+
+ if (bkey_err(k))
+ return k;
+
+ bkey_init(hole);
+ hole->p = iter->pos;
+
+ bch2_key_resize(hole, next.offset - iter->pos.offset);
+ return (struct bkey_s_c) { hole, NULL };
+ }
+}
+
+static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
+{
+ struct bch_dev *ca;
+ unsigned iter;
+
+ if (bch2_dev_bucket_exists(c, *bucket))
+ return true;
+
+ if (bch2_dev_exists2(c, bucket->inode)) {
+ ca = bch_dev_bkey_exists(c, bucket->inode);
+
+ if (bucket->offset < ca->mi.first_bucket) {
+ bucket->offset = ca->mi.first_bucket;
+ return true;
+ }
+
+ bucket->inode++;
+ bucket->offset = 0;
+ }
+
+ rcu_read_lock();
+ iter = bucket->inode;
+ ca = __bch2_next_dev(c, &iter, NULL);
+ if (ca)
+ *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
+ rcu_read_unlock();
+
+ return ca != NULL;
+}
+
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
+{
+ struct bch_fs *c = iter->trans->c;
+ struct bkey_s_c k;
+again:
+ k = bch2_get_key_or_hole(iter, POS_MAX, hole);
+ if (bkey_err(k))
+ return k;
+
+ if (!k.k->type) {
+ struct bpos bucket = bkey_start_pos(k.k);
+
+ if (!bch2_dev_bucket_exists(c, bucket)) {
+ if (!next_bucket(c, &bucket))
+ return bkey_s_c_null;
+
+ bch2_btree_iter_set_pos(iter, bucket);
+ goto again;
+ }
+
+ if (!bch2_dev_bucket_exists(c, k.k->p)) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
+
+ bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
+ }
+ }
+
+ return k;
+}
+
+static noinline_for_stack
+int bch2_check_alloc_key(struct btree_trans *trans,
+ struct bkey_s_c alloc_k,
+ struct btree_iter *alloc_iter,
+ struct btree_iter *discard_iter,
+ struct btree_iter *freespace_iter,
+ struct btree_iter *bucket_gens_iter)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+ unsigned discard_key_type, freespace_key_type;
+ unsigned gens_offset;
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
+ "alloc key for invalid device:bucket %llu:%llu",
+ alloc_k.k->p.inode, alloc_k.k->p.offset))
+ return bch2_btree_delete_at(trans, alloc_iter, 0);
+
+ ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
+ if (!ca->mi.freespace_initialized)
+ return 0;
+
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
+
+ discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
+ bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
+ k = bch2_btree_iter_peek_slot(discard_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != discard_key_type &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
+ " %s",
+ bch2_bkey_types[k.k->type],
+ bch2_bkey_types[discard_key_type],
+ (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+ struct bkey_i *update =
+ bch2_trans_kmalloc(trans, sizeof(*update));
+
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ goto err;
+
+ bkey_init(&update->k);
+ update->k.type = discard_key_type;
+ update->k.p = discard_iter->pos;
+
+ ret = bch2_trans_update(trans, discard_iter, update, 0);
+ if (ret)
+ goto err;
+ }
+
+ freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
+ bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
+ k = bch2_btree_iter_peek_slot(freespace_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != freespace_key_type &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
+ " %s",
+ bch2_bkey_types[k.k->type],
+ bch2_bkey_types[freespace_key_type],
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+ struct bkey_i *update =
+ bch2_trans_kmalloc(trans, sizeof(*update));
+
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ goto err;
+
+ bkey_init(&update->k);
+ update->k.type = freespace_key_type;
+ update->k.p = freespace_iter->pos;
+ bch2_key_resize(&update->k, 1);
+
+ ret = bch2_trans_update(trans, freespace_iter, update, 0);
+ if (ret)
+ goto err;
+ }
+
+ bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+ k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (a->gen != alloc_gen(k, gens_offset) &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n"
+ " %s",
+ alloc_gen(k, gens_offset), a->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+ struct bkey_i_bucket_gens *g =
+ bch2_trans_kmalloc(trans, sizeof(*g));
+
+ ret = PTR_ERR_OR_ZERO(g);
+ if (ret)
+ goto err;
+
+ if (k.k->type == KEY_TYPE_bucket_gens) {
+ bkey_reassemble(&g->k_i, k);
+ } else {
+ bkey_bucket_gens_init(&g->k_i);
+ g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
+ }
+
+ g->v.gens[gens_offset] = a->gen;
+
+ ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
+ if (ret)
+ goto err;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static noinline_for_stack
+int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
+ struct bpos start,
+ struct bpos *end,
+ struct btree_iter *freespace_iter)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca;
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ ca = bch_dev_bkey_exists(c, start.inode);
+ if (!ca->mi.freespace_initialized)
+ return 0;
+
+ bch2_btree_iter_set_pos(freespace_iter, start);
+
+ k = bch2_btree_iter_peek_slot(freespace_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ *end = bkey_min(k.k->p, *end);
+
+ if (k.k->type != KEY_TYPE_set &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "hole in alloc btree missing in freespace btree\n"
+ " device %llu buckets %llu-%llu",
+ freespace_iter->pos.inode,
+ freespace_iter->pos.offset,
+ end->offset))) {
+ struct bkey_i *update =
+ bch2_trans_kmalloc(trans, sizeof(*update));
+
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ goto err;
+
+ bkey_init(&update->k);
+ update->k.type = KEY_TYPE_set;
+ update->k.p = freespace_iter->pos;
+ bch2_key_resize(&update->k,
+ min_t(u64, U32_MAX, end->offset -
+ freespace_iter->pos.offset));
+
+ ret = bch2_trans_update(trans, freespace_iter, update, 0);
+ if (ret)
+ goto err;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static noinline_for_stack
+int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
+ struct bpos start,
+ struct bpos *end,
+ struct btree_iter *bucket_gens_iter)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+ unsigned i, gens_offset, gens_end_offset;
+ int ret;
+
+ if (c->sb.version < bcachefs_metadata_version_bucket_gens)
+ return 0;
+
+ bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
+
+ k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
+ alloc_gens_pos(*end, &gens_end_offset)))
+ gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
+
+ if (k.k->type == KEY_TYPE_bucket_gens) {
+ struct bkey_i_bucket_gens g;
+ bool need_update = false;
+
+ bkey_reassemble(&g.k_i, k);
+
+ for (i = gens_offset; i < gens_end_offset; i++) {
+ if (fsck_err_on(g.v.gens[i], c,
+ "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
+ bucket_gens_pos_to_alloc(k.k->p, i).inode,
+ bucket_gens_pos_to_alloc(k.k->p, i).offset,
+ g.v.gens[i])) {
+ g.v.gens[i] = 0;
+ need_update = true;
+ }
+ }
+
+ if (need_update) {
+ struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
+
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ goto err;
+
+ memcpy(u, &g, sizeof(g));
+
+ ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
+ if (ret)
+ goto err;
+ }
+ }
+
+ *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter alloc_iter;
+ struct bkey_s_c alloc_k;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+ u64 genbits;
+ struct bpos pos;
+ enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
+ ? BCH_DATA_need_discard
+ : BCH_DATA_free;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ pos = iter->pos;
+ pos.offset &= ~(~0ULL << 56);
+ genbits = iter->pos.offset & (~0ULL << 56);
+
+ alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
+ ret = bkey_err(alloc_k);
+ if (ret)
+ return ret;
+
+ if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
+ "entry in %s btree for nonexistant dev:bucket %llu:%llu",
+ bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
+ goto delete;
+
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
+
+ if (fsck_err_on(a->data_type != state ||
+ (state == BCH_DATA_free &&
+ genbits != alloc_freespace_genbits(*a)), c,
+ "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
+ (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
+ bch2_btree_ids[iter->btree_id],
+ iter->pos.inode,
+ iter->pos.offset,
+ a->data_type == state,
+ genbits >> 56, alloc_freespace_genbits(*a) >> 56))
+ goto delete;
+out:
+fsck_err:
+ set_btree_iter_dontneed(&alloc_iter);
+ bch2_trans_iter_exit(trans, &alloc_iter);
+ printbuf_exit(&buf);
+ return ret;
+delete:
+ ret = bch2_btree_delete_extent_at(trans, iter,
+ iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW);
+ goto out;
+}
+
+static int bch2_check_discard_freespace_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end)
+{
+ if (!btree_id_is_extents(iter->btree_id)) {
+ return __bch2_check_discard_freespace_key(trans, iter);
+ } else {
+ int ret = 0;
+
+ while (!bkey_eq(iter->pos, end) &&
+ !(ret = btree_trans_too_many_iters(trans) ?:
+ __bch2_check_discard_freespace_key(trans, iter)))
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+
+ return ret;
+ }
+}
+
+/*
+ * We've already checked that generation numbers in the bucket_gens btree are
+ * valid for buckets that exist; this just checks for keys for nonexistent
+ * buckets.
+ */
+static noinline_for_stack
+int bch2_check_bucket_gens_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i_bucket_gens g;
+ struct bch_dev *ca;
+ u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+ u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
+ u64 b;
+ bool need_update = false, dev_exists;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
+ bkey_reassemble(&g.k_i, k);
+
+ /* if no bch_dev, skip out whether we repair or not */
+ dev_exists = bch2_dev_exists2(c, k.k->p.inode);
+ if (!dev_exists) {
+ if (fsck_err_on(!dev_exists, c,
+ "bucket_gens key for invalid device:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter, 0);
+ }
+ goto out;
+ }
+
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ if (fsck_err_on(end <= ca->mi.first_bucket ||
+ start >= ca->mi.nbuckets, c,
+ "bucket_gens key for invalid buckets:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter, 0);
+ goto out;
+ }
+
+ for (b = start; b < ca->mi.first_bucket; b++)
+ if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+ "bucket_gens key has nonzero gen for invalid bucket")) {
+ g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+ need_update = true;
+ }
+
+ for (b = ca->mi.nbuckets; b < end; b++)
+ if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+ "bucket_gens key has nonzero gen for invalid bucket")) {
+ g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+ need_update = true;
+ }
+
+ if (need_update) {
+ struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
+
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ goto out;
+
+ memcpy(u, &g, sizeof(g));
+ ret = bch2_trans_update(trans, iter, u, 0);
+ }
+out:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_check_alloc_info(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
+ struct bkey hole;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH);
+ bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
+ BTREE_ITER_PREFETCH);
+ bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
+ BTREE_ITER_PREFETCH);
+ bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
+ BTREE_ITER_PREFETCH);
+
+ while (1) {
+ struct bpos next;
+
+ bch2_trans_begin(trans);
+
+ k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
+ ret = bkey_err(k);
+ if (ret)
+ goto bkey_err;
+
+ if (!k.k)
+ break;
+
+ if (k.k->type) {
+ next = bpos_nosnap_successor(k.k->p);
+
+ ret = bch2_check_alloc_key(trans,
+ k, &iter,
+ &discard_iter,
+ &freespace_iter,
+ &bucket_gens_iter);
+ if (ret)
+ goto bkey_err;
+ } else {
+ next = k.k->p;
+
+ ret = bch2_check_alloc_hole_freespace(trans,
+ bkey_start_pos(k.k),
+ &next,
+ &freespace_iter) ?:
+ bch2_check_alloc_hole_bucket_gens(trans,
+ bkey_start_pos(k.k),
+ &next,
+ &bucket_gens_iter);
+ if (ret)
+ goto bkey_err;
+ }
+
+ ret = bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_set_pos(&iter, next);
+bkey_err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &bucket_gens_iter);
+ bch2_trans_iter_exit(trans, &freespace_iter);
+ bch2_trans_iter_exit(trans, &discard_iter);
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret < 0)
+ goto err;
+
+ ret = for_each_btree_key2(trans, iter,
+ BTREE_ID_need_discard, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ bch2_check_discard_freespace_key(trans, &iter, k.k->p)) ?:
+ for_each_btree_key2(trans, iter,
+ BTREE_ID_freespace, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ bch2_check_discard_freespace_key(trans, &iter, k.k->p)) ?:
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_bucket_gens, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_bucket_gens_key(trans, &iter, k));
+err:
+ bch2_trans_put(trans);
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
+ struct btree_iter *alloc_iter)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter lru_iter;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+ struct bkey_s_c alloc_k, lru_k;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ alloc_k = bch2_btree_iter_peek(alloc_iter);
+ if (!alloc_k.k)
+ return 0;
+
+ ret = bkey_err(alloc_k);
+ if (ret)
+ return ret;
+
+ a = bch2_alloc_to_v4(alloc_k, &a_convert);
+
+ if (a->data_type != BCH_DATA_cached)
+ return 0;
+
+ lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
+ lru_pos(alloc_k.k->p.inode,
+ bucket_to_u64(alloc_k.k->p),
+ a->io_time[READ]), 0);
+ ret = bkey_err(lru_k);
+ if (ret)
+ return ret;
+
+ if (fsck_err_on(!a->io_time[READ], c,
+ "cached bucket with read_time 0\n"
+ " %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
+ fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
+ "missing lru entry\n"
+ " %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+ u64 read_time = a->io_time[READ] ?:
+ atomic64_read(&c->io_clock[READ].now);
+
+ ret = bch2_lru_set(trans,
+ alloc_k.k->p.inode,
+ bucket_to_u64(alloc_k.k->p),
+ read_time);
+ if (ret)
+ goto err;
+
+ if (a->io_time[READ] != read_time) {
+ struct bkey_i_alloc_v4 *a_mut =
+ bch2_alloc_to_v4_mut(trans, alloc_k);
+ ret = PTR_ERR_OR_ZERO(a_mut);
+ if (ret)
+ goto err;
+
+ a_mut->v.io_time[READ] = read_time;
+ ret = bch2_trans_update(trans, alloc_iter,
+ &a_mut->k_i, BTREE_TRIGGER_NORUN);
+ if (ret)
+ goto err;
+ }
+ }
+err:
+fsck_err:
+ bch2_trans_iter_exit(trans, &lru_iter);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
+ POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_alloc_to_lru_ref(trans, &iter)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int bch2_discard_one_bucket(struct btree_trans *trans,
+ struct btree_iter *need_discard_iter,
+ struct bpos *discard_pos_done,
+ u64 *seen,
+ u64 *open,
+ u64 *need_journal_commit,
+ u64 *discarded)
+{
+ struct bch_fs *c = trans->c;
+ struct bpos pos = need_discard_iter->pos;
+ struct btree_iter iter = { NULL };
+ struct bkey_s_c k;
+ struct bch_dev *ca;
+ struct bkey_i_alloc_v4 *a;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ ca = bch_dev_bkey_exists(c, pos.inode);
+ if (!percpu_ref_tryget(&ca->io_ref)) {
+ bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
+ return 0;
+ }
+
+ if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
+ (*open)++;
+ goto out;
+ }
+
+ if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+ c->journal.flushed_seq_ondisk,
+ pos.inode, pos.offset)) {
+ (*need_journal_commit)++;
+ goto out;
+ }
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+ need_discard_iter->pos,
+ BTREE_ITER_CACHED);
+ ret = bkey_err(k);
+ if (ret)
+ goto out;
+
+ a = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ goto out;
+
+ if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
+ a->v.gen++;
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+ goto write;
+ }
+
+ if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
+ if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+ bch2_trans_inconsistent(trans,
+ "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
+ "%s",
+ a->v.journal_seq,
+ c->journal.flushed_seq_ondisk,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ }
+ goto out;
+ }
+
+ if (a->v.data_type != BCH_DATA_need_discard) {
+ if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+ bch2_trans_inconsistent(trans,
+ "bucket incorrectly set in need_discard btree\n"
+ "%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ }
+
+ goto out;
+ }
+
+ if (!bkey_eq(*discard_pos_done, iter.pos) &&
+ ca->mi.discard && !c->opts.nochanges) {
+ /*
+ * This works without any other locks because this is the only
+ * thread that removes items from the need_discard tree
+ */
+ bch2_trans_unlock(trans);
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ k.k->p.offset * ca->mi.bucket_size,
+ ca->mi.bucket_size,
+ GFP_KERNEL);
+ *discard_pos_done = iter.pos;
+
+ ret = bch2_trans_relock_notrace(trans);
+ if (ret)
+ goto out;
+ }
+
+ SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
+ a->v.data_type = alloc_data_type(a->v, a->v.data_type);
+write:
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BCH_WATERMARK_btree|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto out;
+
+ this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
+ (*discarded)++;
+out:
+ (*seen)++;
+ bch2_trans_iter_exit(trans, &iter);
+ percpu_ref_put(&ca->io_ref);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static void bch2_do_discards_work(struct work_struct *work)
+{
+ struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
+ struct bpos discard_pos_done = POS_MAX;
+ int ret;
+
+ /*
+ * We're doing the commit in bch2_discard_one_bucket instead of using
+ * for_each_btree_key_commit() so that we can increment counters after
+ * successful commit:
+ */
+ ret = bch2_trans_run(c,
+ for_each_btree_key2(trans, iter,
+ BTREE_ID_need_discard, POS_MIN, 0, k,
+ bch2_discard_one_bucket(trans, &iter, &discard_pos_done,
+ &seen,
+ &open,
+ &need_journal_commit,
+ &discarded)));
+
+ if (need_journal_commit * 2 > seen)
+ bch2_journal_flush_async(&c->journal, NULL);
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+
+ trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
+ bch2_err_str(ret));
+}
+
+void bch2_do_discards(struct bch_fs *c)
+{
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
+ !queue_work(c->write_ref_wq, &c->discard_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_discard);
+}
+
+static int invalidate_one_bucket(struct btree_trans *trans,
+ struct btree_iter *lru_iter,
+ struct bkey_s_c lru_k,
+ s64 *nr_to_invalidate)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter alloc_iter = { NULL };
+ struct bkey_i_alloc_v4 *a = NULL;
+ struct printbuf buf = PRINTBUF;
+ struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
+ unsigned cached_sectors;
+ int ret = 0;
+
+ if (*nr_to_invalidate <= 0)
+ return 1;
+
+ if (!bch2_dev_bucket_exists(c, bucket)) {
+ prt_str(&buf, "lru entry points to invalid bucket");
+ goto err;
+ }
+
+ if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
+ return 0;
+
+ a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ goto out;
+
+ /* We expect harmless races here due to the btree write buffer: */
+ if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
+ goto out;
+
+ BUG_ON(a->v.data_type != BCH_DATA_cached);
+
+ if (!a->v.cached_sectors)
+ bch_err(c, "invalidating empty bucket, confused");
+
+ cached_sectors = a->v.cached_sectors;
+
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+ a->v.gen++;
+ a->v.data_type = 0;
+ a->v.dirty_sectors = 0;
+ a->v.cached_sectors = 0;
+ a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+ a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
+
+ ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
+ BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BCH_WATERMARK_btree|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto out;
+
+ trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
+ --*nr_to_invalidate;
+out:
+ bch2_trans_iter_exit(trans, &alloc_iter);
+ printbuf_exit(&buf);
+ return ret;
+err:
+ prt_str(&buf, "\n lru key: ");
+ bch2_bkey_val_to_text(&buf, c, lru_k);
+
+ prt_str(&buf, "\n lru entry: ");
+ bch2_lru_pos_to_text(&buf, lru_iter->pos);
+
+ prt_str(&buf, "\n alloc key: ");
+ if (!a)
+ bch2_bpos_to_text(&buf, bucket);
+ else
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
+
+ bch_err(c, "%s", buf.buf);
+ if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
+ bch2_inconsistent_error(c);
+ ret = -EINVAL;
+ }
+
+ goto out;
+}
+
+static void bch2_do_invalidates_work(struct work_struct *work)
+{
+ struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
+ struct bch_dev *ca;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ unsigned i;
+ int ret = 0;
+
+ ret = bch2_btree_write_buffer_flush(trans);
+ if (ret)
+ goto err;
+
+ for_each_member_device(ca, c, i) {
+ s64 nr_to_invalidate =
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
+ lru_pos(ca->dev_idx, 0, 0),
+ lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
+ BTREE_ITER_INTENT, k,
+ invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
+
+ if (ret < 0) {
+ percpu_ref_put(&ca->ref);
+ break;
+ }
+ }
+err:
+ bch2_trans_put(trans);
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+}
+
+void bch2_do_invalidates(struct bch_fs *c)
+{
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
+ !queue_work(c->write_ref_wq, &c->invalidate_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+}
+
+int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
+ u64 bucket_start, u64 bucket_end)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey hole;
+ struct bpos end = POS(ca->dev_idx, bucket_end);
+ struct bch_member *m;
+ unsigned long last_updated = jiffies;
+ int ret;
+
+ BUG_ON(bucket_start > bucket_end);
+ BUG_ON(bucket_end > ca->mi.nbuckets);
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
+ BTREE_ITER_PREFETCH);
+ /*
+ * Scan the alloc btree for every bucket on @ca, and add buckets to the
+ * freespace/need_discard/need_gc_gens btrees as needed:
+ */
+ while (1) {
+ if (last_updated + HZ * 10 < jiffies) {
+ bch_info(ca, "%s: currently at %llu/%llu",
+ __func__, iter.pos.offset, ca->mi.nbuckets);
+ last_updated = jiffies;
+ }
+
+ bch2_trans_begin(trans);
+
+ if (bkey_ge(iter.pos, end)) {
+ ret = 0;
+ break;
+ }
+
+ k = bch2_get_key_or_hole(&iter, end, &hole);
+ ret = bkey_err(k);
+ if (ret)
+ goto bkey_err;
+
+ if (k.k->type) {
+ /*
+ * We process live keys in the alloc btree one at a
+ * time:
+ */
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+
+ ret = bch2_bucket_do_index(trans, k, a, true) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_advance(&iter);
+ } else {
+ struct bkey_i *freespace;
+
+ freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
+ ret = PTR_ERR_OR_ZERO(freespace);
+ if (ret)
+ goto bkey_err;
+
+ bkey_init(&freespace->k);
+ freespace->k.type = KEY_TYPE_set;
+ freespace->k.p = k.k->p;
+ freespace->k.size = k.k->size;
+
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_set_pos(&iter, k.k->p);
+ }
+bkey_err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+
+ if (ret < 0) {
+ bch_err_msg(ca, ret, "initializing free space");
+ return ret;
+ }
+
+ mutex_lock(&c->sb_lock);
+ m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
+ mutex_unlock(&c->sb_lock);
+
+ return 0;
+}
+
+int bch2_fs_freespace_init(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ int ret = 0;
+ bool doing_init = false;
+
+ /*
+ * We can crash during the device add path, so we need to check this on
+ * every mount:
+ */
+
+ for_each_member_device(ca, c, i) {
+ if (ca->mi.freespace_initialized)
+ continue;
+
+ if (!doing_init) {
+ bch_info(c, "initializing freespace");
+ doing_init = true;
+ }
+
+ ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
+ if (ret) {
+ percpu_ref_put(&ca->ref);
+ bch_err_fn(c, ret);
+ return ret;
+ }
+ }
+
+ if (doing_init) {
+ mutex_lock(&c->sb_lock);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ bch_verbose(c, "done initializing freespace");
+ }
+
+ return 0;
+}
+
+/* Bucket IO clocks: */
+
+int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
+ size_t bucket_nr, int rw)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_i_alloc_v4 *a;
+ u64 now;
+ int ret = 0;
+
+ a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ return ret;
+
+ now = atomic64_read(&c->io_clock[rw].now);
+ if (a->v.io_time[rw] == now)
+ goto out;
+
+ a->v.io_time[rw] = now;
+
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL, 0);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/* Startup/shutdown (ro/rw): */
+
+void bch2_recalc_capacity(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ u64 capacity = 0, reserved_sectors = 0, gc_reserve;
+ unsigned bucket_size_max = 0;
+ unsigned long ra_pages = 0;
+ unsigned i;
+
+ lockdep_assert_held(&c->state_lock);
+
+ for_each_online_member(ca, c, i) {
+ struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
+
+ ra_pages += bdi->ra_pages;
+ }
+
+ bch2_set_ra_pages(c, ra_pages);
+
+ for_each_rw_member(ca, c, i) {
+ u64 dev_reserve = 0;
+
+ /*
+ * We need to reserve buckets (from the number
+ * of currently available buckets) against
+ * foreground writes so that mainly copygc can
+ * make forward progress.
+ *
+ * We need enough to refill the various reserves
+ * from scratch - copygc will use its entire
+ * reserve all at once, then run against when
+ * its reserve is refilled (from the formerly
+ * available buckets).
+ *
+ * This reserve is just used when considering if
+ * allocations for foreground writes must wait -
+ * not -ENOSPC calculations.
+ */
+
+ dev_reserve += ca->nr_btree_reserve * 2;
+ dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
+
+ dev_reserve += 1; /* btree write point */
+ dev_reserve += 1; /* copygc write point */
+ dev_reserve += 1; /* rebalance write point */
+
+ dev_reserve *= ca->mi.bucket_size;
+
+ capacity += bucket_to_sector(ca, ca->mi.nbuckets -
+ ca->mi.first_bucket);
+
+ reserved_sectors += dev_reserve * 2;
+
+ bucket_size_max = max_t(unsigned, bucket_size_max,
+ ca->mi.bucket_size);
+ }
+
+ gc_reserve = c->opts.gc_reserve_bytes
+ ? c->opts.gc_reserve_bytes >> 9
+ : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
+
+ reserved_sectors = max(gc_reserve, reserved_sectors);
+
+ reserved_sectors = min(reserved_sectors, capacity);
+
+ c->capacity = capacity - reserved_sectors;
+
+ c->bucket_size_max = bucket_size_max;
+
+ /* Wake up case someone was waiting for buckets */
+ closure_wake_up(&c->freelist_wait);
+}
+
+static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct open_bucket *ob;
+ bool ret = false;
+
+ for (ob = c->open_buckets;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+ ob++) {
+ spin_lock(&ob->lock);
+ if (ob->valid && !ob->on_partial_list &&
+ ob->dev == ca->dev_idx)
+ ret = true;
+ spin_unlock(&ob->lock);
+ }
+
+ return ret;
+}
+
+/* device goes ro: */
+void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
+{
+ unsigned i;
+
+ /* First, remove device from allocation groups: */
+
+ for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
+ clear_bit(ca->dev_idx, c->rw_devs[i].d);
+
+ /*
+ * Capacity is calculated based off of devices in allocation groups:
+ */
+ bch2_recalc_capacity(c);
+
+ bch2_open_buckets_stop(c, ca, false);
+
+ /*
+ * Wake up threads that were blocked on allocation, so they can notice
+ * the device can no longer be removed and the capacity has changed:
+ */
+ closure_wake_up(&c->freelist_wait);
+
+ /*
+ * journal_res_get() can block waiting for free space in the journal -
+ * it needs to notice there may not be devices to allocate from anymore:
+ */
+ wake_up(&c->journal.wait);
+
+ /* Now wait for any in flight writes: */
+
+ closure_wait_event(&c->open_buckets_wait,
+ !bch2_dev_has_open_write_point(c, ca));
+}
+
+/* device goes rw: */
+void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
+ if (ca->mi.data_allowed & (1 << i))
+ set_bit(ca->dev_idx, c->rw_devs[i].d);
+}
+
+void bch2_fs_allocator_background_init(struct bch_fs *c)
+{
+ spin_lock_init(&c->freelist_lock);
+ INIT_WORK(&c->discard_work, bch2_do_discards_work);
+ INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
+}
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
new file mode 100644
index 000000000000..97042067d2a9
--- /dev/null
+++ b/fs/bcachefs/alloc_background.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ALLOC_BACKGROUND_H
+#define _BCACHEFS_ALLOC_BACKGROUND_H
+
+#include "bcachefs.h"
+#include "alloc_types.h"
+#include "buckets.h"
+#include "debug.h"
+#include "super.h"
+
+enum bkey_invalid_flags;
+
+/* How out of date a pointer gen is allowed to be: */
+#define BUCKET_GC_GEN_MAX 96U
+
+static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
+{
+ struct bch_dev *ca;
+
+ if (!bch2_dev_exists2(c, pos.inode))
+ return false;
+
+ ca = bch_dev_bkey_exists(c, pos.inode);
+ return pos.offset >= ca->mi.first_bucket &&
+ pos.offset < ca->mi.nbuckets;
+}
+
+static inline u64 bucket_to_u64(struct bpos bucket)
+{
+ return (bucket.inode << 48) | bucket.offset;
+}
+
+static inline struct bpos u64_to_bucket(u64 bucket)
+{
+ return POS(bucket >> 48, bucket & ~(~0ULL << 48));
+}
+
+static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
+{
+ return a.gen - a.oldest_gen;
+}
+
+static inline enum bch_data_type __alloc_data_type(u32 dirty_sectors,
+ u32 cached_sectors,
+ u32 stripe,
+ struct bch_alloc_v4 a,
+ enum bch_data_type data_type)
+{
+ if (stripe)
+ return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
+ if (dirty_sectors)
+ return data_type;
+ if (cached_sectors)
+ return BCH_DATA_cached;
+ if (BCH_ALLOC_V4_NEED_DISCARD(&a))
+ return BCH_DATA_need_discard;
+ if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
+ return BCH_DATA_need_gc_gens;
+ return BCH_DATA_free;
+}
+
+static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
+ enum bch_data_type data_type)
+{
+ return __alloc_data_type(a.dirty_sectors, a.cached_sectors,
+ a.stripe, a, data_type);
+}
+
+static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
+{
+ return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type;
+}
+
+static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
+{
+ return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
+}
+
+#define DATA_TYPES_MOVABLE \
+ ((1U << BCH_DATA_btree)| \
+ (1U << BCH_DATA_user)| \
+ (1U << BCH_DATA_stripe))
+
+static inline bool data_type_movable(enum bch_data_type type)
+{
+ return (1U << type) & DATA_TYPES_MOVABLE;
+}
+
+static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
+ struct bch_dev *ca)
+{
+ if (!data_type_movable(a.data_type) ||
+ a.dirty_sectors >= ca->mi.bucket_size)
+ return 0;
+
+ return div_u64((u64) a.dirty_sectors * (1ULL << 31), ca->mi.bucket_size);
+}
+
+static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
+{
+ return ((u64) alloc_gc_gen(a) >> 4) << 56;
+}
+
+static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
+{
+ pos.offset |= alloc_freespace_genbits(a);
+ return pos;
+}
+
+static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
+{
+ unsigned ret = (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
+ BCH_ALLOC_V4_U64s_V0) +
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a) *
+ (sizeof(struct bch_backpointer) / sizeof(u64));
+
+ BUG_ON(ret > U8_MAX - BKEY_U64s);
+ return ret;
+}
+
+static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
+{
+ set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v));
+}
+
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
+
+void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
+
+static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
+{
+ const struct bch_alloc_v4 *ret;
+
+ if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
+ goto slowpath;
+
+ ret = bkey_s_c_to_alloc_v4(k).v;
+ if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
+ goto slowpath;
+
+ return ret;
+slowpath:
+ __bch2_alloc_to_v4(k, convert);
+ return convert;
+}
+
+struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
+
+int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
+
+int bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_alloc_v4_swab(struct bkey_s);
+void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_alloc ((struct bkey_ops) { \
+ .key_invalid = bch2_alloc_v1_invalid, \
+ .val_to_text = bch2_alloc_to_text, \
+ .trans_trigger = bch2_trans_mark_alloc, \
+ .atomic_trigger = bch2_mark_alloc, \
+ .min_val_size = 8, \
+})
+
+#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
+ .key_invalid = bch2_alloc_v2_invalid, \
+ .val_to_text = bch2_alloc_to_text, \
+ .trans_trigger = bch2_trans_mark_alloc, \
+ .atomic_trigger = bch2_mark_alloc, \
+ .min_val_size = 8, \
+})
+
+#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
+ .key_invalid = bch2_alloc_v3_invalid, \
+ .val_to_text = bch2_alloc_to_text, \
+ .trans_trigger = bch2_trans_mark_alloc, \
+ .atomic_trigger = bch2_mark_alloc, \
+ .min_val_size = 16, \
+})
+
+#define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \
+ .key_invalid = bch2_alloc_v4_invalid, \
+ .val_to_text = bch2_alloc_to_text, \
+ .swab = bch2_alloc_v4_swab, \
+ .trans_trigger = bch2_trans_mark_alloc, \
+ .atomic_trigger = bch2_mark_alloc, \
+ .min_val_size = 48, \
+})
+
+int bch2_bucket_gens_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \
+ .key_invalid = bch2_bucket_gens_invalid, \
+ .val_to_text = bch2_bucket_gens_to_text, \
+})
+
+int bch2_bucket_gens_init(struct bch_fs *);
+
+static inline bool bkey_is_alloc(const struct bkey *k)
+{
+ return k->type == KEY_TYPE_alloc ||
+ k->type == KEY_TYPE_alloc_v2 ||
+ k->type == KEY_TYPE_alloc_v3;
+}
+
+int bch2_alloc_read(struct bch_fs *);
+
+int bch2_trans_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_check_alloc_info(struct bch_fs *);
+int bch2_check_alloc_to_lru_refs(struct bch_fs *);
+void bch2_do_discards(struct bch_fs *);
+
+static inline u64 should_invalidate_buckets(struct bch_dev *ca,
+ struct bch_dev_usage u)
+{
+ u64 want_free = ca->mi.nbuckets >> 7;
+ u64 free = max_t(s64, 0,
+ u.d[BCH_DATA_free].buckets
+ + u.d[BCH_DATA_need_discard].buckets
+ - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
+
+ return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
+}
+
+void bch2_do_invalidates(struct bch_fs *);
+
+static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
+{
+ return (void *) ((u64 *) &a->v +
+ (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
+ BCH_ALLOC_V4_U64s_V0));
+}
+
+static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a)
+{
+ return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a));
+}
+
+int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
+int bch2_fs_freespace_init(struct bch_fs *);
+
+void bch2_recalc_capacity(struct bch_fs *);
+
+void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
+void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
+
+void bch2_fs_allocator_background_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
new file mode 100644
index 000000000000..3bc4abd3d7d5
--- /dev/null
+++ b/fs/bcachefs/alloc_foreground.c
@@ -0,0 +1,1576 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2012 Google, Inc.
+ *
+ * Foreground allocator code: allocate buckets from freelist, and allocate in
+ * sector granularity from writepoints.
+ *
+ * bch2_bucket_alloc() allocates a single bucket from a specific device.
+ *
+ * bch2_bucket_alloc_set() allocates one or more buckets from different devices
+ * in a given filesystem.
+ */
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "backpointers.h"
+#include "btree_iter.h"
+#include "btree_update.h"
+#include "btree_gc.h"
+#include "buckets.h"
+#include "buckets_waiting_for_journal.h"
+#include "clock.h"
+#include "debug.h"
+#include "disk_groups.h"
+#include "ec.h"
+#include "error.h"
+#include "io_write.h"
+#include "journal.h"
+#include "movinggc.h"
+#include "nocow_locking.h"
+#include "trace.h"
+
+#include <linux/math64.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
+ struct mutex *lock)
+{
+ if (!mutex_trylock(lock)) {
+ bch2_trans_unlock(trans);
+ mutex_lock(lock);
+ }
+}
+
+const char * const bch2_watermarks[] = {
+#define x(t) #t,
+ BCH_WATERMARKS()
+#undef x
+ NULL
+};
+
+/*
+ * Open buckets represent a bucket that's currently being allocated from. They
+ * serve two purposes:
+ *
+ * - They track buckets that have been partially allocated, allowing for
+ * sub-bucket sized allocations - they're used by the sector allocator below
+ *
+ * - They provide a reference to the buckets they own that mark and sweep GC
+ * can find, until the new allocation has a pointer to it inserted into the
+ * btree
+ *
+ * When allocating some space with the sector allocator, the allocation comes
+ * with a reference to an open bucket - the caller is required to put that
+ * reference _after_ doing the index update that makes its allocation reachable.
+ */
+
+void bch2_reset_alloc_cursors(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL)
+ ca->alloc_cursor = 0;
+ rcu_read_unlock();
+}
+
+static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
+{
+ open_bucket_idx_t idx = ob - c->open_buckets;
+ open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+ ob->hash = *slot;
+ *slot = idx;
+}
+
+static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
+{
+ open_bucket_idx_t idx = ob - c->open_buckets;
+ open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+ while (*slot != idx) {
+ BUG_ON(!*slot);
+ slot = &c->open_buckets[*slot].hash;
+ }
+
+ *slot = ob->hash;
+ ob->hash = 0;
+}
+
+void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+ if (ob->ec) {
+ ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
+ return;
+ }
+
+ percpu_down_read(&c->mark_lock);
+ spin_lock(&ob->lock);
+
+ ob->valid = false;
+ ob->data_type = 0;
+
+ spin_unlock(&ob->lock);
+ percpu_up_read(&c->mark_lock);
+
+ spin_lock(&c->freelist_lock);
+ bch2_open_bucket_hash_remove(c, ob);
+
+ ob->freelist = c->open_buckets_freelist;
+ c->open_buckets_freelist = ob - c->open_buckets;
+
+ c->open_buckets_nr_free++;
+ ca->nr_open_buckets--;
+ spin_unlock(&c->freelist_lock);
+
+ closure_wake_up(&c->open_buckets_wait);
+}
+
+void bch2_open_bucket_write_error(struct bch_fs *c,
+ struct open_buckets *obs,
+ unsigned dev)
+{
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, obs, ob, i)
+ if (ob->dev == dev && ob->ec)
+ bch2_ec_bucket_cancel(c, ob);
+}
+
+static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
+{
+ struct open_bucket *ob;
+
+ BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
+
+ ob = c->open_buckets + c->open_buckets_freelist;
+ c->open_buckets_freelist = ob->freelist;
+ atomic_set(&ob->pin, 1);
+ ob->data_type = 0;
+
+ c->open_buckets_nr_free--;
+ return ob;
+}
+
+static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
+{
+ BUG_ON(c->open_buckets_partial_nr >=
+ ARRAY_SIZE(c->open_buckets_partial));
+
+ spin_lock(&c->freelist_lock);
+ ob->on_partial_list = true;
+ c->open_buckets_partial[c->open_buckets_partial_nr++] =
+ ob - c->open_buckets;
+ spin_unlock(&c->freelist_lock);
+
+ closure_wake_up(&c->open_buckets_wait);
+ closure_wake_up(&c->freelist_wait);
+}
+
+/* _only_ for allocating the journal on a new device: */
+long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
+{
+ while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
+ u64 b = ca->new_fs_bucket_idx++;
+
+ if (!is_superblock_bucket(ca, b) &&
+ (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
+ return b;
+ }
+
+ return -1;
+}
+
+static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
+{
+ switch (watermark) {
+ case BCH_WATERMARK_reclaim:
+ return 0;
+ case BCH_WATERMARK_btree:
+ case BCH_WATERMARK_btree_copygc:
+ return OPEN_BUCKETS_COUNT / 4;
+ case BCH_WATERMARK_copygc:
+ return OPEN_BUCKETS_COUNT / 3;
+ default:
+ return OPEN_BUCKETS_COUNT / 2;
+ }
+}
+
+static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+ u64 bucket,
+ enum bch_watermark watermark,
+ const struct bch_alloc_v4 *a,
+ struct bucket_alloc_state *s,
+ struct closure *cl)
+{
+ struct open_bucket *ob;
+
+ if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
+ s->skipped_nouse++;
+ return NULL;
+ }
+
+ if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
+ s->skipped_open++;
+ return NULL;
+ }
+
+ if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+ c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
+ s->skipped_need_journal_commit++;
+ return NULL;
+ }
+
+ if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
+ s->skipped_nocow++;
+ return NULL;
+ }
+
+ spin_lock(&c->freelist_lock);
+
+ if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
+ if (cl)
+ closure_wait(&c->open_buckets_wait, cl);
+
+ if (!c->blocked_allocate_open_bucket)
+ c->blocked_allocate_open_bucket = local_clock();
+
+ spin_unlock(&c->freelist_lock);
+ return ERR_PTR(-BCH_ERR_open_buckets_empty);
+ }
+
+ /* Recheck under lock: */
+ if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
+ spin_unlock(&c->freelist_lock);
+ s->skipped_open++;
+ return NULL;
+ }
+
+ ob = bch2_open_bucket_alloc(c);
+
+ spin_lock(&ob->lock);
+
+ ob->valid = true;
+ ob->sectors_free = ca->mi.bucket_size;
+ ob->dev = ca->dev_idx;
+ ob->gen = a->gen;
+ ob->bucket = bucket;
+ spin_unlock(&ob->lock);
+
+ ca->nr_open_buckets++;
+ bch2_open_bucket_hash_add(c, ob);
+
+ if (c->blocked_allocate_open_bucket) {
+ bch2_time_stats_update(
+ &c->times[BCH_TIME_blocked_allocate_open_bucket],
+ c->blocked_allocate_open_bucket);
+ c->blocked_allocate_open_bucket = 0;
+ }
+
+ if (c->blocked_allocate) {
+ bch2_time_stats_update(
+ &c->times[BCH_TIME_blocked_allocate],
+ c->blocked_allocate);
+ c->blocked_allocate = 0;
+ }
+
+ spin_unlock(&c->freelist_lock);
+ return ob;
+}
+
+static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
+ enum bch_watermark watermark, u64 free_entry,
+ struct bucket_alloc_state *s,
+ struct bkey_s_c freespace_k,
+ struct closure *cl)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter = { NULL };
+ struct bkey_s_c k;
+ struct open_bucket *ob;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+ u64 b = free_entry & ~(~0ULL << 56);
+ unsigned genbits = free_entry >> 56;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
+ prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
+ " freespace key ",
+ ca->mi.first_bucket, ca->mi.nbuckets);
+ bch2_bkey_val_to_text(&buf, c, freespace_k);
+ bch2_trans_inconsistent(trans, "%s", buf.buf);
+ ob = ERR_PTR(-EIO);
+ goto err;
+ }
+
+ k = bch2_bkey_get_iter(trans, &iter,
+ BTREE_ID_alloc, POS(ca->dev_idx, b),
+ BTREE_ITER_CACHED);
+ ret = bkey_err(k);
+ if (ret) {
+ ob = ERR_PTR(ret);
+ goto err;
+ }
+
+ a = bch2_alloc_to_v4(k, &a_convert);
+
+ if (a->data_type != BCH_DATA_free) {
+ if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
+ ob = NULL;
+ goto err;
+ }
+
+ prt_printf(&buf, "non free bucket in freespace btree\n"
+ " freespace key ");
+ bch2_bkey_val_to_text(&buf, c, freespace_k);
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch2_trans_inconsistent(trans, "%s", buf.buf);
+ ob = ERR_PTR(-EIO);
+ goto err;
+ }
+
+ if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+ prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
+ " freespace key ",
+ genbits, alloc_freespace_genbits(*a) >> 56);
+ bch2_bkey_val_to_text(&buf, c, freespace_k);
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch2_trans_inconsistent(trans, "%s", buf.buf);
+ ob = ERR_PTR(-EIO);
+ goto err;
+ }
+
+ if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
+ struct bch_backpointer bp;
+ struct bpos bp_pos = POS_MIN;
+
+ ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
+ &bp_pos, &bp,
+ BTREE_ITER_NOPRESERVE);
+ if (ret) {
+ ob = ERR_PTR(ret);
+ goto err;
+ }
+
+ if (!bkey_eq(bp_pos, POS_MAX)) {
+ /*
+ * Bucket may have data in it - we don't call
+ * bc2h_trans_inconnsistent() because fsck hasn't
+ * finished yet
+ */
+ ob = NULL;
+ goto err;
+ }
+ }
+
+ ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
+ if (!ob)
+ iter.path->preserve = false;
+err:
+ if (iter.trans && iter.path)
+ set_btree_iter_dontneed(&iter);
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+ return ob;
+}
+
+/*
+ * This path is for before the freespace btree is initialized:
+ *
+ * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
+ * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
+ */
+static noinline struct open_bucket *
+bch2_bucket_alloc_early(struct btree_trans *trans,
+ struct bch_dev *ca,
+ enum bch_watermark watermark,
+ struct bucket_alloc_state *s,
+ struct closure *cl)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct open_bucket *ob = NULL;
+ u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
+ u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
+ int ret;
+again:
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
+ BTREE_ITER_SLOTS, k, ret) {
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+
+ if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
+ break;
+
+ if (ca->new_fs_bucket_idx &&
+ is_superblock_bucket(ca, k.k->p.offset))
+ continue;
+
+ a = bch2_alloc_to_v4(k, &a_convert);
+
+ if (a->data_type != BCH_DATA_free)
+ continue;
+
+ s->buckets_seen++;
+
+ ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
+ if (ob)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ ca->alloc_cursor = alloc_cursor;
+
+ if (!ob && ret)
+ ob = ERR_PTR(ret);
+
+ if (!ob && alloc_cursor > alloc_start) {
+ alloc_cursor = alloc_start;
+ goto again;
+ }
+
+ return ob;
+}
+
+static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
+ struct bch_dev *ca,
+ enum bch_watermark watermark,
+ struct bucket_alloc_state *s,
+ struct closure *cl)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct open_bucket *ob = NULL;
+ u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
+ u64 alloc_cursor = alloc_start;
+ int ret;
+
+ BUG_ON(ca->new_fs_bucket_idx);
+again:
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
+ POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
+ if (k.k->p.inode != ca->dev_idx)
+ break;
+
+ for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
+ alloc_cursor < k.k->p.offset;
+ alloc_cursor++) {
+ ret = btree_trans_too_many_iters(trans);
+ if (ret) {
+ ob = ERR_PTR(ret);
+ break;
+ }
+
+ s->buckets_seen++;
+
+ ob = try_alloc_bucket(trans, ca, watermark,
+ alloc_cursor, s, k, cl);
+ if (ob) {
+ iter.path->preserve = false;
+ break;
+ }
+ }
+
+ if (ob || ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ ca->alloc_cursor = alloc_cursor;
+
+ if (!ob && ret)
+ ob = ERR_PTR(ret);
+
+ if (!ob && alloc_start > ca->mi.first_bucket) {
+ alloc_cursor = alloc_start = ca->mi.first_bucket;
+ goto again;
+ }
+
+ return ob;
+}
+
+/**
+ * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
+ * @trans: transaction object
+ * @ca: device to allocate from
+ * @watermark: how important is this allocation?
+ * @cl: if not NULL, closure to be used to wait if buckets not available
+ * @usage: for secondarily also returning the current device usage
+ *
+ * Returns: an open_bucket on success, or an ERR_PTR() on failure.
+ */
+static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
+ struct bch_dev *ca,
+ enum bch_watermark watermark,
+ struct closure *cl,
+ struct bch_dev_usage *usage)
+{
+ struct bch_fs *c = trans->c;
+ struct open_bucket *ob = NULL;
+ bool freespace = READ_ONCE(ca->mi.freespace_initialized);
+ u64 avail;
+ struct bucket_alloc_state s = { 0 };
+ bool waiting = false;
+again:
+ bch2_dev_usage_read_fast(ca, usage);
+ avail = dev_buckets_free(ca, *usage, watermark);
+
+ if (usage->d[BCH_DATA_need_discard].buckets > avail)
+ bch2_do_discards(c);
+
+ if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
+ bch2_do_gc_gens(c);
+
+ if (should_invalidate_buckets(ca, *usage))
+ bch2_do_invalidates(c);
+
+ if (!avail) {
+ if (cl && !waiting) {
+ closure_wait(&c->freelist_wait, cl);
+ waiting = true;
+ goto again;
+ }
+
+ if (!c->blocked_allocate)
+ c->blocked_allocate = local_clock();
+
+ ob = ERR_PTR(-BCH_ERR_freelist_empty);
+ goto err;
+ }
+
+ if (waiting)
+ closure_wake_up(&c->freelist_wait);
+alloc:
+ ob = likely(freespace)
+ ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
+ : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
+
+ if (s.skipped_need_journal_commit * 2 > avail)
+ bch2_journal_flush_async(&c->journal, NULL);
+
+ if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
+ freespace = false;
+ goto alloc;
+ }
+err:
+ if (!ob)
+ ob = ERR_PTR(-BCH_ERR_no_buckets_found);
+
+ if (!IS_ERR(ob))
+ trace_and_count(c, bucket_alloc, ca,
+ bch2_watermarks[watermark],
+ ob->bucket,
+ usage->d[BCH_DATA_free].buckets,
+ avail,
+ bch2_copygc_wait_amount(c),
+ c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+ &s,
+ cl == NULL,
+ "");
+ else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
+ trace_and_count(c, bucket_alloc_fail, ca,
+ bch2_watermarks[watermark],
+ 0,
+ usage->d[BCH_DATA_free].buckets,
+ avail,
+ bch2_copygc_wait_amount(c),
+ c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+ &s,
+ cl == NULL,
+ bch2_err_str(PTR_ERR(ob)));
+
+ return ob;
+}
+
+struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_watermark watermark,
+ struct closure *cl)
+{
+ struct bch_dev_usage usage;
+ struct open_bucket *ob;
+
+ bch2_trans_do(c, NULL, NULL, 0,
+ PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
+ cl, &usage)));
+ return ob;
+}
+
+static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
+ unsigned l, unsigned r)
+{
+ return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
+ (stripe->next_alloc[l] < stripe->next_alloc[r]));
+}
+
+#define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
+
+struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
+ struct dev_stripe_state *stripe,
+ struct bch_devs_mask *devs)
+{
+ struct dev_alloc_list ret = { .nr = 0 };
+ unsigned i;
+
+ for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
+ ret.devs[ret.nr++] = i;
+
+ bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
+ return ret;
+}
+
+static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
+ struct dev_stripe_state *stripe,
+ struct bch_dev_usage *usage)
+{
+ u64 *v = stripe->next_alloc + ca->dev_idx;
+ u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
+ u64 free_space_inv = free_space
+ ? div64_u64(1ULL << 48, free_space)
+ : 1ULL << 48;
+ u64 scale = *v / 4;
+
+ if (*v + free_space_inv >= *v)
+ *v += free_space_inv;
+ else
+ *v = U64_MAX;
+
+ for (v = stripe->next_alloc;
+ v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
+ *v = *v < scale ? 0 : *v - scale;
+}
+
+void bch2_dev_stripe_increment(struct bch_dev *ca,
+ struct dev_stripe_state *stripe)
+{
+ struct bch_dev_usage usage;
+
+ bch2_dev_usage_read_fast(ca, &usage);
+ bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
+}
+
+static int add_new_bucket(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ unsigned flags,
+ struct open_bucket *ob)
+{
+ unsigned durability =
+ bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+
+ BUG_ON(*nr_effective >= nr_replicas);
+ BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
+
+ __clear_bit(ob->dev, devs_may_alloc->d);
+ *nr_effective += (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
+ ? durability : 1;
+ *have_cache |= !durability;
+
+ ob_push(c, ptrs, ob);
+
+ if (*nr_effective >= nr_replicas)
+ return 1;
+ if (ob->ec)
+ return 1;
+ return 0;
+}
+
+int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
+ struct open_buckets *ptrs,
+ struct dev_stripe_state *stripe,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ unsigned flags,
+ enum bch_data_type data_type,
+ enum bch_watermark watermark,
+ struct closure *cl)
+{
+ struct bch_fs *c = trans->c;
+ struct dev_alloc_list devs_sorted =
+ bch2_dev_alloc_list(c, stripe, devs_may_alloc);
+ unsigned dev;
+ struct bch_dev *ca;
+ int ret = -BCH_ERR_insufficient_devices;
+ unsigned i;
+
+ BUG_ON(*nr_effective >= nr_replicas);
+
+ for (i = 0; i < devs_sorted.nr; i++) {
+ struct bch_dev_usage usage;
+ struct open_bucket *ob;
+
+ dev = devs_sorted.devs[i];
+
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca)
+ percpu_ref_get(&ca->ref);
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
+
+ if (!ca->mi.durability && *have_cache) {
+ percpu_ref_put(&ca->ref);
+ continue;
+ }
+
+ ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
+ if (!IS_ERR(ob))
+ bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
+ percpu_ref_put(&ca->ref);
+
+ if (IS_ERR(ob)) {
+ ret = PTR_ERR(ob);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
+ break;
+ continue;
+ }
+
+ ob->data_type = data_type;
+
+ if (add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob)) {
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Allocate from stripes: */
+
+/*
+ * if we can't allocate a new stripe because there are already too many
+ * partially filled stripes, force allocating from an existing stripe even when
+ * it's to a device we don't want:
+ */
+
+static int bucket_alloc_from_stripe(struct btree_trans *trans,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ u16 target,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ enum bch_watermark watermark,
+ unsigned flags,
+ struct closure *cl)
+{
+ struct bch_fs *c = trans->c;
+ struct dev_alloc_list devs_sorted;
+ struct ec_stripe_head *h;
+ struct open_bucket *ob;
+ unsigned i, ec_idx;
+ int ret = 0;
+
+ if (nr_replicas < 2)
+ return 0;
+
+ if (ec_open_bucket(c, ptrs))
+ return 0;
+
+ h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
+ if (IS_ERR(h))
+ return PTR_ERR(h);
+ if (!h)
+ return 0;
+
+ devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
+
+ for (i = 0; i < devs_sorted.nr; i++)
+ for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
+ if (!h->s->blocks[ec_idx])
+ continue;
+
+ ob = c->open_buckets + h->s->blocks[ec_idx];
+ if (ob->dev == devs_sorted.devs[i] &&
+ !test_and_set_bit(ec_idx, h->s->blocks_allocated))
+ goto got_bucket;
+ }
+ goto out_put_head;
+got_bucket:
+ ob->ec_idx = ec_idx;
+ ob->ec = h->s;
+ ec_stripe_new_get(h->s, STRIPE_REF_io);
+
+ ret = add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob);
+out_put_head:
+ bch2_ec_stripe_head_put(c, h);
+ return ret;
+}
+
+/* Sector allocator */
+
+static bool want_bucket(struct bch_fs *c,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ bool *have_cache, bool ec,
+ struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+ if (!test_bit(ob->dev, devs_may_alloc->d))
+ return false;
+
+ if (ob->data_type != wp->data_type)
+ return false;
+
+ if (!ca->mi.durability &&
+ (wp->data_type == BCH_DATA_btree || ec || *have_cache))
+ return false;
+
+ if (ec != (ob->ec != NULL))
+ return false;
+
+ return true;
+}
+
+static int bucket_alloc_set_writepoint(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ bool ec, unsigned flags)
+{
+ struct open_buckets ptrs_skip = { .nr = 0 };
+ struct open_bucket *ob;
+ unsigned i;
+ int ret = 0;
+
+ open_bucket_for_each(c, &wp->ptrs, ob, i) {
+ if (!ret && want_bucket(c, wp, devs_may_alloc,
+ have_cache, ec, ob))
+ ret = add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob);
+ else
+ ob_push(c, &ptrs_skip, ob);
+ }
+ wp->ptrs = ptrs_skip;
+
+ return ret;
+}
+
+static int bucket_alloc_set_partial(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache, bool ec,
+ enum bch_watermark watermark,
+ unsigned flags)
+{
+ int i, ret = 0;
+
+ if (!c->open_buckets_partial_nr)
+ return 0;
+
+ spin_lock(&c->freelist_lock);
+
+ if (!c->open_buckets_partial_nr)
+ goto unlock;
+
+ for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
+ struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
+
+ if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev_usage usage;
+ u64 avail;
+
+ bch2_dev_usage_read_fast(ca, &usage);
+ avail = dev_buckets_free(ca, usage, watermark);
+ if (!avail)
+ continue;
+
+ array_remove_item(c->open_buckets_partial,
+ c->open_buckets_partial_nr,
+ i);
+ ob->on_partial_list = false;
+
+ ret = add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_replicas, nr_effective,
+ have_cache, flags, ob);
+ if (ret)
+ break;
+ }
+ }
+unlock:
+ spin_unlock(&c->freelist_lock);
+ return ret;
+}
+
+static int __open_bucket_add_buckets(struct btree_trans *trans,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_list *devs_have,
+ u16 target,
+ bool erasure_code,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ enum bch_watermark watermark,
+ unsigned flags,
+ struct closure *_cl)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_devs_mask devs;
+ struct open_bucket *ob;
+ struct closure *cl = NULL;
+ unsigned i;
+ int ret;
+
+ devs = target_rw_devs(c, wp->data_type, target);
+
+ /* Don't allocate from devices we already have pointers to: */
+ for (i = 0; i < devs_have->nr; i++)
+ __clear_bit(devs_have->devs[i], devs.d);
+
+ open_bucket_for_each(c, ptrs, ob, i)
+ __clear_bit(ob->dev, devs.d);
+
+ if (erasure_code && ec_open_bucket(c, ptrs))
+ return 0;
+
+ ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
+ nr_replicas, nr_effective,
+ have_cache, erasure_code, flags);
+ if (ret)
+ return ret;
+
+ ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
+ nr_replicas, nr_effective,
+ have_cache, erasure_code, watermark, flags);
+ if (ret)
+ return ret;
+
+ if (erasure_code) {
+ ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
+ target,
+ nr_replicas, nr_effective,
+ have_cache,
+ watermark, flags, _cl);
+ } else {
+retry_blocking:
+ /*
+ * Try nonblocking first, so that if one device is full we'll try from
+ * other devices:
+ */
+ ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
+ nr_replicas, nr_effective, have_cache,
+ flags, wp->data_type, watermark, cl);
+ if (ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
+ !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
+ !cl && _cl) {
+ cl = _cl;
+ goto retry_blocking;
+ }
+ }
+
+ return ret;
+}
+
+static int open_bucket_add_buckets(struct btree_trans *trans,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_list *devs_have,
+ u16 target,
+ unsigned erasure_code,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ enum bch_watermark watermark,
+ unsigned flags,
+ struct closure *cl)
+{
+ int ret;
+
+ if (erasure_code) {
+ ret = __open_bucket_add_buckets(trans, ptrs, wp,
+ devs_have, target, erasure_code,
+ nr_replicas, nr_effective, have_cache,
+ watermark, flags, cl);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
+ bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
+ bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ return ret;
+ if (*nr_effective >= nr_replicas)
+ return 0;
+ }
+
+ ret = __open_bucket_add_buckets(trans, ptrs, wp,
+ devs_have, target, false,
+ nr_replicas, nr_effective, have_cache,
+ watermark, flags, cl);
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * should_drop_bucket - check if this is open_bucket should go away
+ * @ob: open_bucket to predicate on
+ * @c: filesystem handle
+ * @ca: if set, we're killing buckets for a particular device
+ * @ec: if true, we're shutting down erasure coding and killing all ec
+ * open_buckets
+ * otherwise, return true
+ * Returns: true if we should kill this open_bucket
+ *
+ * We're killing open_buckets because we're shutting down a device, erasure
+ * coding, or the entire filesystem - check if this open_bucket matches:
+ */
+static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
+ struct bch_dev *ca, bool ec)
+{
+ if (ec) {
+ return ob->ec != NULL;
+ } else if (ca) {
+ bool drop = ob->dev == ca->dev_idx;
+ struct open_bucket *ob2;
+ unsigned i;
+
+ if (!drop && ob->ec) {
+ unsigned nr_blocks;
+
+ mutex_lock(&ob->ec->lock);
+ nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
+
+ for (i = 0; i < nr_blocks; i++) {
+ if (!ob->ec->blocks[i])
+ continue;
+
+ ob2 = c->open_buckets + ob->ec->blocks[i];
+ drop |= ob2->dev == ca->dev_idx;
+ }
+ mutex_unlock(&ob->ec->lock);
+ }
+
+ return drop;
+ } else {
+ return true;
+ }
+}
+
+static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
+ bool ec, struct write_point *wp)
+{
+ struct open_buckets ptrs = { .nr = 0 };
+ struct open_bucket *ob;
+ unsigned i;
+
+ mutex_lock(&wp->lock);
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ if (should_drop_bucket(ob, c, ca, ec))
+ bch2_open_bucket_put(c, ob);
+ else
+ ob_push(c, &ptrs, ob);
+ wp->ptrs = ptrs;
+ mutex_unlock(&wp->lock);
+}
+
+void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
+ bool ec)
+{
+ unsigned i;
+
+ /* Next, close write points that point to this device... */
+ for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
+ bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
+
+ bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
+ bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
+ bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
+
+ mutex_lock(&c->btree_reserve_cache_lock);
+ while (c->btree_reserve_cache_nr) {
+ struct btree_alloc *a =
+ &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
+
+ bch2_open_buckets_put(c, &a->ob);
+ }
+ mutex_unlock(&c->btree_reserve_cache_lock);
+
+ spin_lock(&c->freelist_lock);
+ i = 0;
+ while (i < c->open_buckets_partial_nr) {
+ struct open_bucket *ob =
+ c->open_buckets + c->open_buckets_partial[i];
+
+ if (should_drop_bucket(ob, c, ca, ec)) {
+ --c->open_buckets_partial_nr;
+ swap(c->open_buckets_partial[i],
+ c->open_buckets_partial[c->open_buckets_partial_nr]);
+ ob->on_partial_list = false;
+ spin_unlock(&c->freelist_lock);
+ bch2_open_bucket_put(c, ob);
+ spin_lock(&c->freelist_lock);
+ } else {
+ i++;
+ }
+ }
+ spin_unlock(&c->freelist_lock);
+
+ bch2_ec_stop_dev(c, ca);
+}
+
+static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
+ unsigned long write_point)
+{
+ unsigned hash =
+ hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
+
+ return &c->write_points_hash[hash];
+}
+
+static struct write_point *__writepoint_find(struct hlist_head *head,
+ unsigned long write_point)
+{
+ struct write_point *wp;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(wp, head, node)
+ if (wp->write_point == write_point)
+ goto out;
+ wp = NULL;
+out:
+ rcu_read_unlock();
+ return wp;
+}
+
+static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
+{
+ u64 stranded = c->write_points_nr * c->bucket_size_max;
+ u64 free = bch2_fs_usage_read_short(c).free;
+
+ return stranded * factor > free;
+}
+
+static bool try_increase_writepoints(struct bch_fs *c)
+{
+ struct write_point *wp;
+
+ if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
+ too_many_writepoints(c, 32))
+ return false;
+
+ wp = c->write_points + c->write_points_nr++;
+ hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
+ return true;
+}
+
+static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
+{
+ struct bch_fs *c = trans->c;
+ struct write_point *wp;
+ struct open_bucket *ob;
+ unsigned i;
+
+ mutex_lock(&c->write_points_hash_lock);
+ if (c->write_points_nr < old_nr) {
+ mutex_unlock(&c->write_points_hash_lock);
+ return true;
+ }
+
+ if (c->write_points_nr == 1 ||
+ !too_many_writepoints(c, 8)) {
+ mutex_unlock(&c->write_points_hash_lock);
+ return false;
+ }
+
+ wp = c->write_points + --c->write_points_nr;
+
+ hlist_del_rcu(&wp->node);
+ mutex_unlock(&c->write_points_hash_lock);
+
+ bch2_trans_mutex_lock_norelock(trans, &wp->lock);
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ open_bucket_free_unused(c, ob);
+ wp->ptrs.nr = 0;
+ mutex_unlock(&wp->lock);
+ return true;
+}
+
+static struct write_point *writepoint_find(struct btree_trans *trans,
+ unsigned long write_point)
+{
+ struct bch_fs *c = trans->c;
+ struct write_point *wp, *oldest;
+ struct hlist_head *head;
+
+ if (!(write_point & 1UL)) {
+ wp = (struct write_point *) write_point;
+ bch2_trans_mutex_lock_norelock(trans, &wp->lock);
+ return wp;
+ }
+
+ head = writepoint_hash(c, write_point);
+restart_find:
+ wp = __writepoint_find(head, write_point);
+ if (wp) {
+lock_wp:
+ bch2_trans_mutex_lock_norelock(trans, &wp->lock);
+ if (wp->write_point == write_point)
+ goto out;
+ mutex_unlock(&wp->lock);
+ goto restart_find;
+ }
+restart_find_oldest:
+ oldest = NULL;
+ for (wp = c->write_points;
+ wp < c->write_points + c->write_points_nr; wp++)
+ if (!oldest || time_before64(wp->last_used, oldest->last_used))
+ oldest = wp;
+
+ bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
+ bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
+ if (oldest >= c->write_points + c->write_points_nr ||
+ try_increase_writepoints(c)) {
+ mutex_unlock(&c->write_points_hash_lock);
+ mutex_unlock(&oldest->lock);
+ goto restart_find_oldest;
+ }
+
+ wp = __writepoint_find(head, write_point);
+ if (wp && wp != oldest) {
+ mutex_unlock(&c->write_points_hash_lock);
+ mutex_unlock(&oldest->lock);
+ goto lock_wp;
+ }
+
+ wp = oldest;
+ hlist_del_rcu(&wp->node);
+ wp->write_point = write_point;
+ hlist_add_head_rcu(&wp->node, head);
+ mutex_unlock(&c->write_points_hash_lock);
+out:
+ wp->last_used = local_clock();
+ return wp;
+}
+
+/*
+ * Get us an open_bucket we can allocate from, return with it locked:
+ */
+int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
+ unsigned target,
+ unsigned erasure_code,
+ struct write_point_specifier write_point,
+ struct bch_devs_list *devs_have,
+ unsigned nr_replicas,
+ unsigned nr_replicas_required,
+ enum bch_watermark watermark,
+ unsigned flags,
+ struct closure *cl,
+ struct write_point **wp_ret)
+{
+ struct bch_fs *c = trans->c;
+ struct write_point *wp;
+ struct open_bucket *ob;
+ struct open_buckets ptrs;
+ unsigned nr_effective, write_points_nr;
+ bool have_cache;
+ int ret;
+ int i;
+
+ BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
+
+ BUG_ON(!nr_replicas || !nr_replicas_required);
+retry:
+ ptrs.nr = 0;
+ nr_effective = 0;
+ write_points_nr = c->write_points_nr;
+ have_cache = false;
+
+ *wp_ret = wp = writepoint_find(trans, write_point.v);
+
+ /* metadata may not allocate on cache devices: */
+ if (wp->data_type != BCH_DATA_user)
+ have_cache = true;
+
+ if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
+ ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+ target, erasure_code,
+ nr_replicas, &nr_effective,
+ &have_cache, watermark,
+ flags, NULL);
+ if (!ret ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto alloc_done;
+
+ /* Don't retry from all devices if we're out of open buckets: */
+ if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+ goto allocate_blocking;
+
+ /*
+ * Only try to allocate cache (durability = 0 devices) from the
+ * specified target:
+ */
+ have_cache = true;
+
+ ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+ 0, erasure_code,
+ nr_replicas, &nr_effective,
+ &have_cache, watermark,
+ flags, cl);
+ } else {
+allocate_blocking:
+ ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+ target, erasure_code,
+ nr_replicas, &nr_effective,
+ &have_cache, watermark,
+ flags, cl);
+ }
+alloc_done:
+ BUG_ON(!ret && nr_effective < nr_replicas);
+
+ if (erasure_code && !ec_open_bucket(c, &ptrs))
+ pr_debug("failed to get ec bucket: ret %u", ret);
+
+ if (ret == -BCH_ERR_insufficient_devices &&
+ nr_effective >= nr_replicas_required)
+ ret = 0;
+
+ if (ret)
+ goto err;
+
+ /* Free buckets we didn't use: */
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ open_bucket_free_unused(c, ob);
+
+ wp->ptrs = ptrs;
+
+ wp->sectors_free = UINT_MAX;
+
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
+
+ BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
+
+ return 0;
+err:
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ if (ptrs.nr < ARRAY_SIZE(ptrs.v))
+ ob_push(c, &ptrs, ob);
+ else
+ open_bucket_free_unused(c, ob);
+ wp->ptrs = ptrs;
+
+ mutex_unlock(&wp->lock);
+
+ if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
+ try_decrease_writepoints(trans, write_points_nr))
+ goto retry;
+
+ if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
+ bch2_err_matches(ret, BCH_ERR_freelist_empty))
+ return cl
+ ? -BCH_ERR_bucket_alloc_blocked
+ : -BCH_ERR_ENOSPC_bucket_alloc;
+
+ return ret;
+}
+
+struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+ return (struct bch_extent_ptr) {
+ .type = 1 << BCH_EXTENT_ENTRY_ptr,
+ .gen = ob->gen,
+ .dev = ob->dev,
+ .offset = bucket_to_sector(ca, ob->bucket) +
+ ca->mi.bucket_size -
+ ob->sectors_free,
+ };
+}
+
+void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
+ struct bkey_i *k, unsigned sectors,
+ bool cached)
+{
+ bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
+}
+
+/*
+ * Append pointers to the space we just allocated to @k, and mark @sectors space
+ * as allocated out of @ob
+ */
+void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
+{
+ bch2_alloc_sectors_done_inlined(c, wp);
+}
+
+static inline void writepoint_init(struct write_point *wp,
+ enum bch_data_type type)
+{
+ mutex_init(&wp->lock);
+ wp->data_type = type;
+
+ INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
+ INIT_LIST_HEAD(&wp->writes);
+ spin_lock_init(&wp->writes_lock);
+}
+
+void bch2_fs_allocator_foreground_init(struct bch_fs *c)
+{
+ struct open_bucket *ob;
+ struct write_point *wp;
+
+ mutex_init(&c->write_points_hash_lock);
+ c->write_points_nr = ARRAY_SIZE(c->write_points);
+
+ /* open bucket 0 is a sentinal NULL: */
+ spin_lock_init(&c->open_buckets[0].lock);
+
+ for (ob = c->open_buckets + 1;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
+ spin_lock_init(&ob->lock);
+ c->open_buckets_nr_free++;
+
+ ob->freelist = c->open_buckets_freelist;
+ c->open_buckets_freelist = ob - c->open_buckets;
+ }
+
+ writepoint_init(&c->btree_write_point, BCH_DATA_btree);
+ writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
+ writepoint_init(&c->copygc_write_point, BCH_DATA_user);
+
+ for (wp = c->write_points;
+ wp < c->write_points + c->write_points_nr; wp++) {
+ writepoint_init(wp, BCH_DATA_user);
+
+ wp->last_used = local_clock();
+ wp->write_point = (unsigned long) wp;
+ hlist_add_head_rcu(&wp->node,
+ writepoint_hash(c, wp->write_point));
+ }
+}
+
+static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ unsigned data_type = ob->data_type;
+ barrier(); /* READ_ONCE() doesn't work on bitfields */
+
+ prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
+ ob - c->open_buckets,
+ atomic_read(&ob->pin),
+ data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
+ ob->dev, ob->bucket, ob->gen,
+ ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
+ if (ob->ec)
+ prt_printf(out, " ec idx %llu", ob->ec->idx);
+ if (ob->on_partial_list)
+ prt_str(out, " partial");
+ prt_newline(out);
+}
+
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct open_bucket *ob;
+
+ out->atomic++;
+
+ for (ob = c->open_buckets;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+ ob++) {
+ spin_lock(&ob->lock);
+ if (ob->valid && !ob->on_partial_list)
+ bch2_open_bucket_to_text(out, c, ob);
+ spin_unlock(&ob->lock);
+ }
+
+ --out->atomic;
+}
+
+void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ unsigned i;
+
+ out->atomic++;
+ spin_lock(&c->freelist_lock);
+
+ for (i = 0; i < c->open_buckets_partial_nr; i++)
+ bch2_open_bucket_to_text(out, c,
+ c->open_buckets + c->open_buckets_partial[i]);
+
+ spin_unlock(&c->freelist_lock);
+ --out->atomic;
+}
+
+static const char * const bch2_write_point_states[] = {
+#define x(n) #n,
+ WRITE_POINT_STATES()
+#undef x
+ NULL
+};
+
+static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
+ struct write_point *wp)
+{
+ struct open_bucket *ob;
+ unsigned i;
+
+ prt_printf(out, "%lu: ", wp->write_point);
+ prt_human_readable_u64(out, wp->sectors_allocated);
+
+ prt_printf(out, " last wrote: ");
+ bch2_pr_time_units(out, sched_clock() - wp->last_used);
+
+ for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
+ prt_printf(out, " %s: ", bch2_write_point_states[i]);
+ bch2_pr_time_units(out, wp->time[i]);
+ }
+
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ bch2_open_bucket_to_text(out, c, ob);
+ printbuf_indent_sub(out, 2);
+}
+
+void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct write_point *wp;
+
+ prt_str(out, "Foreground write points\n");
+ for (wp = c->write_points;
+ wp < c->write_points + ARRAY_SIZE(c->write_points);
+ wp++)
+ bch2_write_point_to_text(out, c, wp);
+
+ prt_str(out, "Copygc write point\n");
+ bch2_write_point_to_text(out, c, &c->copygc_write_point);
+
+ prt_str(out, "Rebalance write point\n");
+ bch2_write_point_to_text(out, c, &c->rebalance_write_point);
+
+ prt_str(out, "Btree write point\n");
+ bch2_write_point_to_text(out, c, &c->btree_write_point);
+}
diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h
new file mode 100644
index 000000000000..7aaeec44c746
--- /dev/null
+++ b/fs/bcachefs/alloc_foreground.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ALLOC_FOREGROUND_H
+#define _BCACHEFS_ALLOC_FOREGROUND_H
+
+#include "bcachefs.h"
+#include "alloc_types.h"
+#include "extents.h"
+#include "sb-members.h"
+
+#include <linux/hash.h>
+
+struct bkey;
+struct bch_dev;
+struct bch_fs;
+struct bch_devs_List;
+
+extern const char * const bch2_watermarks[];
+
+void bch2_reset_alloc_cursors(struct bch_fs *);
+
+struct dev_alloc_list {
+ unsigned nr;
+ u8 devs[BCH_SB_MEMBERS_MAX];
+};
+
+struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *,
+ struct dev_stripe_state *,
+ struct bch_devs_mask *);
+void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
+
+long bch2_bucket_alloc_new_fs(struct bch_dev *);
+
+struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
+ enum bch_watermark, struct closure *);
+
+static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
+ struct open_bucket *ob)
+{
+ BUG_ON(obs->nr >= ARRAY_SIZE(obs->v));
+
+ obs->v[obs->nr++] = ob - c->open_buckets;
+}
+
+#define open_bucket_for_each(_c, _obs, _ob, _i) \
+ for ((_i) = 0; \
+ (_i) < (_obs)->nr && \
+ ((_ob) = (_c)->open_buckets + (_obs)->v[_i], true); \
+ (_i)++)
+
+static inline struct open_bucket *ec_open_bucket(struct bch_fs *c,
+ struct open_buckets *obs)
+{
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, obs, ob, i)
+ if (ob->ec)
+ return ob;
+
+ return NULL;
+}
+
+void bch2_open_bucket_write_error(struct bch_fs *,
+ struct open_buckets *, unsigned);
+
+void __bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
+
+static inline void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
+{
+ if (atomic_dec_and_test(&ob->pin))
+ __bch2_open_bucket_put(c, ob);
+}
+
+static inline void bch2_open_buckets_put(struct bch_fs *c,
+ struct open_buckets *ptrs)
+{
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, ptrs, ob, i)
+ bch2_open_bucket_put(c, ob);
+ ptrs->nr = 0;
+}
+
+static inline void bch2_alloc_sectors_done_inlined(struct bch_fs *c, struct write_point *wp)
+{
+ struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
+ wp->ptrs = keep;
+
+ mutex_unlock(&wp->lock);
+
+ bch2_open_buckets_put(c, &ptrs);
+}
+
+static inline void bch2_open_bucket_get(struct bch_fs *c,
+ struct write_point *wp,
+ struct open_buckets *ptrs)
+{
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, &wp->ptrs, ob, i) {
+ ob->data_type = wp->data_type;
+ atomic_inc(&ob->pin);
+ ob_push(c, ptrs, ob);
+ }
+}
+
+static inline open_bucket_idx_t *open_bucket_hashslot(struct bch_fs *c,
+ unsigned dev, u64 bucket)
+{
+ return c->open_buckets_hash +
+ (jhash_3words(dev, bucket, bucket >> 32, 0) &
+ (OPEN_BUCKETS_COUNT - 1));
+}
+
+static inline bool bch2_bucket_is_open(struct bch_fs *c, unsigned dev, u64 bucket)
+{
+ open_bucket_idx_t slot = *open_bucket_hashslot(c, dev, bucket);
+
+ while (slot) {
+ struct open_bucket *ob = &c->open_buckets[slot];
+
+ if (ob->dev == dev && ob->bucket == bucket)
+ return true;
+
+ slot = ob->hash;
+ }
+
+ return false;
+}
+
+static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 bucket)
+{
+ bool ret;
+
+ if (bch2_bucket_is_open(c, dev, bucket))
+ return true;
+
+ spin_lock(&c->freelist_lock);
+ ret = bch2_bucket_is_open(c, dev, bucket);
+ spin_unlock(&c->freelist_lock);
+
+ return ret;
+}
+
+int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
+ struct dev_stripe_state *, struct bch_devs_mask *,
+ unsigned, unsigned *, bool *, unsigned,
+ enum bch_data_type, enum bch_watermark,
+ struct closure *);
+
+int bch2_alloc_sectors_start_trans(struct btree_trans *,
+ unsigned, unsigned,
+ struct write_point_specifier,
+ struct bch_devs_list *,
+ unsigned, unsigned,
+ enum bch_watermark,
+ unsigned,
+ struct closure *,
+ struct write_point **);
+
+struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
+
+/*
+ * Append pointers to the space we just allocated to @k, and mark @sectors space
+ * as allocated out of @ob
+ */
+static inline void
+bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
+ struct bkey_i *k, unsigned sectors,
+ bool cached)
+{
+ struct open_bucket *ob;
+ unsigned i;
+
+ BUG_ON(sectors > wp->sectors_free);
+ wp->sectors_free -= sectors;
+ wp->sectors_allocated += sectors;
+
+ open_bucket_for_each(c, &wp->ptrs, ob, i) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
+
+ ptr.cached = cached ||
+ (!ca->mi.durability &&
+ wp->data_type == BCH_DATA_user);
+
+ bch2_bkey_append_ptr(k, ptr);
+
+ BUG_ON(sectors > ob->sectors_free);
+ ob->sectors_free -= sectors;
+ }
+}
+
+void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
+ struct bkey_i *, unsigned, bool);
+void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
+
+void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *, bool);
+
+static inline struct write_point_specifier writepoint_hashed(unsigned long v)
+{
+ return (struct write_point_specifier) { .v = v | 1 };
+}
+
+static inline struct write_point_specifier writepoint_ptr(struct write_point *wp)
+{
+ return (struct write_point_specifier) { .v = (unsigned long) wp };
+}
+
+void bch2_fs_allocator_foreground_init(struct bch_fs *);
+
+void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *);
+void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *);
+
+void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
+
+#endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
new file mode 100644
index 000000000000..b91b7a461056
--- /dev/null
+++ b/fs/bcachefs/alloc_types.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ALLOC_TYPES_H
+#define _BCACHEFS_ALLOC_TYPES_H
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+#include "clock_types.h"
+#include "fifo.h"
+
+struct bucket_alloc_state {
+ u64 buckets_seen;
+ u64 skipped_open;
+ u64 skipped_need_journal_commit;
+ u64 skipped_nocow;
+ u64 skipped_nouse;
+};
+
+#define BCH_WATERMARKS() \
+ x(stripe) \
+ x(normal) \
+ x(copygc) \
+ x(btree) \
+ x(btree_copygc) \
+ x(reclaim)
+
+enum bch_watermark {
+#define x(name) BCH_WATERMARK_##name,
+ BCH_WATERMARKS()
+#undef x
+ BCH_WATERMARK_NR,
+};
+
+#define BCH_WATERMARK_BITS 3
+#define BCH_WATERMARK_MASK ~(~0U << BCH_WATERMARK_BITS)
+
+#define OPEN_BUCKETS_COUNT 1024
+
+#define WRITE_POINT_HASH_NR 32
+#define WRITE_POINT_MAX 32
+
+/*
+ * 0 is never a valid open_bucket_idx_t:
+ */
+typedef u16 open_bucket_idx_t;
+
+struct open_bucket {
+ spinlock_t lock;
+ atomic_t pin;
+ open_bucket_idx_t freelist;
+ open_bucket_idx_t hash;
+
+ /*
+ * When an open bucket has an ec_stripe attached, this is the index of
+ * the block in the stripe this open_bucket corresponds to:
+ */
+ u8 ec_idx;
+ enum bch_data_type data_type:6;
+ unsigned valid:1;
+ unsigned on_partial_list:1;
+
+ u8 dev;
+ u8 gen;
+ u32 sectors_free;
+ u64 bucket;
+ struct ec_stripe_new *ec;
+};
+
+#define OPEN_BUCKET_LIST_MAX 15
+
+struct open_buckets {
+ open_bucket_idx_t nr;
+ open_bucket_idx_t v[OPEN_BUCKET_LIST_MAX];
+};
+
+struct dev_stripe_state {
+ u64 next_alloc[BCH_SB_MEMBERS_MAX];
+};
+
+#define WRITE_POINT_STATES() \
+ x(stopped) \
+ x(waiting_io) \
+ x(waiting_work) \
+ x(running)
+
+enum write_point_state {
+#define x(n) WRITE_POINT_##n,
+ WRITE_POINT_STATES()
+#undef x
+ WRITE_POINT_STATE_NR
+};
+
+struct write_point {
+ struct {
+ struct hlist_node node;
+ struct mutex lock;
+ u64 last_used;
+ unsigned long write_point;
+ enum bch_data_type data_type;
+
+ /* calculated based on how many pointers we're actually going to use: */
+ unsigned sectors_free;
+
+ struct open_buckets ptrs;
+ struct dev_stripe_state stripe;
+
+ u64 sectors_allocated;
+ } __aligned(SMP_CACHE_BYTES);
+
+ struct {
+ struct work_struct index_update_work;
+
+ struct list_head writes;
+ spinlock_t writes_lock;
+
+ enum write_point_state state;
+ u64 last_state_change;
+ u64 time[WRITE_POINT_STATE_NR];
+ } __aligned(SMP_CACHE_BYTES);
+};
+
+struct write_point_specifier {
+ unsigned long v;
+};
+
+#endif /* _BCACHEFS_ALLOC_TYPES_H */
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
new file mode 100644
index 000000000000..cc856150a948
--- /dev/null
+++ b/fs/bcachefs/backpointers.c
@@ -0,0 +1,868 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "bbpos.h"
+#include "alloc_background.h"
+#include "backpointers.h"
+#include "btree_cache.h"
+#include "btree_update.h"
+#include "btree_write_buffer.h"
+#include "error.h"
+
+#include <linux/mm.h>
+
+static bool extent_matches_bp(struct bch_fs *c,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k,
+ struct bpos bucket,
+ struct bch_backpointer bp)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ struct bpos bucket2;
+ struct bch_backpointer bp2;
+
+ if (p.ptr.cached)
+ continue;
+
+ bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
+ &bucket2, &bp2);
+ if (bpos_eq(bucket, bucket2) &&
+ !memcmp(&bp, &bp2, sizeof(bp)))
+ return true;
+ }
+
+ return false;
+}
+
+int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
+ struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
+
+ if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
+ prt_str(err, "backpointer at wrong pos");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
+{
+ prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
+ bch2_btree_ids[bp->btree_id],
+ bp->level,
+ (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
+ (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
+ bp->bucket_len);
+ bch2_bpos_to_text(out, bp->pos);
+}
+
+void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+ prt_str(out, "bucket=");
+ bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
+ prt_str(out, " ");
+
+ bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
+}
+
+void bch2_backpointer_swab(struct bkey_s k)
+{
+ struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
+
+ bp.v->bucket_offset = swab32(bp.v->bucket_offset);
+ bp.v->bucket_len = swab32(bp.v->bucket_len);
+ bch2_bpos_swab(&bp.v->pos);
+}
+
+static noinline int backpointer_mod_err(struct btree_trans *trans,
+ struct bch_backpointer bp,
+ struct bkey_s_c bp_k,
+ struct bkey_s_c orig_k,
+ bool insert)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+
+ if (insert) {
+ prt_printf(&buf, "existing backpointer found when inserting ");
+ bch2_backpointer_to_text(&buf, &bp);
+ prt_newline(&buf);
+ printbuf_indent_add(&buf, 2);
+
+ prt_printf(&buf, "found ");
+ bch2_bkey_val_to_text(&buf, c, bp_k);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "for ");
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+
+ bch_err(c, "%s", buf.buf);
+ } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
+ prt_printf(&buf, "backpointer not found when deleting");
+ prt_newline(&buf);
+ printbuf_indent_add(&buf, 2);
+
+ prt_printf(&buf, "searching for ");
+ bch2_backpointer_to_text(&buf, &bp);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "got ");
+ bch2_bkey_val_to_text(&buf, c, bp_k);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "for ");
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+
+ bch_err(c, "%s", buf.buf);
+ }
+
+ printbuf_exit(&buf);
+
+ if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
+ bch2_inconsistent_error(c);
+ return -EIO;
+ } else {
+ return 0;
+ }
+}
+
+int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
+ struct bkey_i_backpointer *bp_k,
+ struct bch_backpointer bp,
+ struct bkey_s_c orig_k,
+ bool insert)
+{
+ struct btree_iter bp_iter;
+ struct bkey_s_c k;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
+ bp_k->k.p,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_SLOTS|
+ BTREE_ITER_WITH_UPDATES);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (insert
+ ? k.k->type
+ : (k.k->type != KEY_TYPE_backpointer ||
+ memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
+ ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
+ if (ret)
+ goto err;
+ }
+
+ ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
+err:
+ bch2_trans_iter_exit(trans, &bp_iter);
+ return ret;
+}
+
+/*
+ * Find the next backpointer >= *bp_offset:
+ */
+int bch2_get_next_backpointer(struct btree_trans *trans,
+ struct bpos bucket, int gen,
+ struct bpos *bp_pos,
+ struct bch_backpointer *bp,
+ unsigned iter_flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
+ struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
+ struct bkey_s_c k;
+ int ret = 0;
+
+ if (bpos_ge(*bp_pos, bp_end_pos))
+ goto done;
+
+ if (gen >= 0) {
+ k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
+ bucket, BTREE_ITER_CACHED|iter_flags);
+ ret = bkey_err(k);
+ if (ret)
+ goto out;
+
+ if (k.k->type != KEY_TYPE_alloc_v4 ||
+ bkey_s_c_to_alloc_v4(k).v->gen != gen)
+ goto done;
+ }
+
+ *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
+
+ for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
+ *bp_pos, iter_flags, k, ret) {
+ if (bpos_ge(k.k->p, bp_end_pos))
+ break;
+
+ *bp_pos = k.k->p;
+ *bp = *bkey_s_c_to_backpointer(k).v;
+ goto out;
+ }
+done:
+ *bp_pos = SPOS_MAX;
+out:
+ bch2_trans_iter_exit(trans, &bp_iter);
+ bch2_trans_iter_exit(trans, &alloc_iter);
+ return ret;
+}
+
+static void backpointer_not_found(struct btree_trans *trans,
+ struct bpos bp_pos,
+ struct bch_backpointer bp,
+ struct bkey_s_c k,
+ const char *thing_it_points_to)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
+
+ if (likely(!bch2_backpointers_no_use_write_buffer))
+ return;
+
+ prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
+ thing_it_points_to);
+ prt_printf(&buf, "bucket: ");
+ bch2_bpos_to_text(&buf, bucket);
+ prt_printf(&buf, "\n ");
+
+ prt_printf(&buf, "backpointer pos: ");
+ bch2_bpos_to_text(&buf, bp_pos);
+ prt_printf(&buf, "\n ");
+
+ bch2_backpointer_to_text(&buf, &bp);
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, k);
+ if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers)
+ bch_err_ratelimited(c, "%s", buf.buf);
+ else
+ bch2_trans_inconsistent(trans, "%s", buf.buf);
+
+ printbuf_exit(&buf);
+}
+
+struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos bp_pos,
+ struct bch_backpointer bp,
+ unsigned iter_flags)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_root *r = bch2_btree_id_root(c, bp.btree_id);
+ struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
+ struct bkey_s_c k;
+
+ bch2_trans_node_iter_init(trans, iter,
+ bp.btree_id,
+ bp.pos,
+ 0,
+ min(bp.level, r->level),
+ iter_flags);
+ k = bch2_btree_iter_peek_slot(iter);
+ if (bkey_err(k)) {
+ bch2_trans_iter_exit(trans, iter);
+ return k;
+ }
+
+ if (bp.level == r->level + 1)
+ k = bkey_i_to_s_c(&r->key);
+
+ if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
+ return k;
+
+ bch2_trans_iter_exit(trans, iter);
+
+ if (unlikely(bch2_backpointers_no_use_write_buffer)) {
+ if (bp.level) {
+ struct btree *b;
+
+ /*
+ * If a backpointer for a btree node wasn't found, it may be
+ * because it was overwritten by a new btree node that hasn't
+ * been written out yet - backpointer_get_node() checks for
+ * this:
+ */
+ b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
+ if (!IS_ERR_OR_NULL(b))
+ return bkey_i_to_s_c(&b->key);
+
+ bch2_trans_iter_exit(trans, iter);
+
+ if (IS_ERR(b))
+ return bkey_s_c_err(PTR_ERR(b));
+ return bkey_s_c_null;
+ }
+
+ backpointer_not_found(trans, bp_pos, bp, k, "extent");
+ }
+
+ return bkey_s_c_null;
+}
+
+struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos bp_pos,
+ struct bch_backpointer bp)
+{
+ struct bch_fs *c = trans->c;
+ struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
+ struct btree *b;
+
+ BUG_ON(!bp.level);
+
+ bch2_trans_node_iter_init(trans, iter,
+ bp.btree_id,
+ bp.pos,
+ 0,
+ bp.level - 1,
+ 0);
+ b = bch2_btree_iter_peek_node(iter);
+ if (IS_ERR(b))
+ goto err;
+
+ if (b && extent_matches_bp(c, bp.btree_id, bp.level,
+ bkey_i_to_s_c(&b->key),
+ bucket, bp))
+ return b;
+
+ if (b && btree_node_will_make_reachable(b)) {
+ b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
+ } else {
+ backpointer_not_found(trans, bp_pos, bp,
+ bkey_i_to_s_c(&b->key), "btree node");
+ b = NULL;
+ }
+err:
+ bch2_trans_iter_exit(trans, iter);
+ return b;
+}
+
+static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter alloc_iter = { NULL };
+ struct bkey_s_c alloc_k;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
+ "backpointer for missing device:\n%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, bp_iter, 0);
+ goto out;
+ }
+
+ alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
+ bp_pos_to_bucket(c, k.k->p), 0);
+ ret = bkey_err(alloc_k);
+ if (ret)
+ goto out;
+
+ if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
+ "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
+ alloc_iter.pos.inode, alloc_iter.pos.offset,
+ (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, bp_iter, 0);
+ goto out;
+ }
+out:
+fsck_err:
+ bch2_trans_iter_exit(trans, &alloc_iter);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+/* verify that every backpointer has a corresponding alloc key */
+int bch2_check_btree_backpointers(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_backpointers, POS_MIN, 0, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ bch2_check_btree_backpointer(trans, &iter, k)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+struct bpos_level {
+ unsigned level;
+ struct bpos pos;
+};
+
+static int check_bp_exists(struct btree_trans *trans,
+ struct bpos bucket,
+ struct bch_backpointer bp,
+ struct bkey_s_c orig_k,
+ struct bpos bucket_start,
+ struct bpos bucket_end,
+ struct bpos_level *last_flushed)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter bp_iter = { NULL };
+ struct printbuf buf = PRINTBUF;
+ struct bkey_s_c bp_k;
+ int ret;
+
+ if (bpos_lt(bucket, bucket_start) ||
+ bpos_gt(bucket, bucket_end))
+ return 0;
+
+ if (!bch2_dev_bucket_exists(c, bucket))
+ goto missing;
+
+ bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
+ bucket_pos_to_bp(c, bucket, bp.bucket_offset),
+ 0);
+ ret = bkey_err(bp_k);
+ if (ret)
+ goto err;
+
+ if (bp_k.k->type != KEY_TYPE_backpointer ||
+ memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
+ if (last_flushed->level != bp.level ||
+ !bpos_eq(last_flushed->pos, orig_k.k->p)) {
+ last_flushed->level = bp.level;
+ last_flushed->pos = orig_k.k->p;
+
+ ret = bch2_btree_write_buffer_flush_sync(trans) ?:
+ -BCH_ERR_transaction_restart_write_buffer_flush;
+ goto out;
+ }
+ goto missing;
+ }
+out:
+err:
+fsck_err:
+ bch2_trans_iter_exit(trans, &bp_iter);
+ printbuf_exit(&buf);
+ return ret;
+missing:
+ prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
+ bch2_btree_ids[bp.btree_id], bp.level);
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+ prt_printf(&buf, "\nbp pos ");
+ bch2_bpos_to_text(&buf, bp_iter.pos);
+
+ if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers ||
+ c->opts.reconstruct_alloc ||
+ fsck_err(c, "%s", buf.buf))
+ ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
+
+ goto out;
+}
+
+static int check_extent_to_backpointers(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos bucket_start,
+ struct bpos bucket_end,
+ struct bpos_level *last_flushed)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs;
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct bkey_s_c k;
+ int ret;
+
+ k = bch2_btree_iter_peek_all_levels(iter);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+ if (!k.k)
+ return 0;
+
+ ptrs = bch2_bkey_ptrs_c(k);
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ struct bpos bucket_pos;
+ struct bch_backpointer bp;
+
+ if (p.ptr.cached)
+ continue;
+
+ bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
+ k, p, &bucket_pos, &bp);
+
+ ret = check_bp_exists(trans, bucket_pos, bp, k,
+ bucket_start, bucket_end,
+ last_flushed);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int check_btree_root_to_backpointers(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bpos bucket_start,
+ struct bpos bucket_end,
+ struct bpos_level *last_flushed)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_root *r = bch2_btree_id_root(c, btree_id);
+ struct btree_iter iter;
+ struct btree *b;
+ struct bkey_s_c k;
+ struct bkey_ptrs_c ptrs;
+ struct extent_ptr_decoded p;
+ const union bch_extent_entry *entry;
+ int ret;
+
+ bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, r->level, 0);
+ b = bch2_btree_iter_peek_node(&iter);
+ ret = PTR_ERR_OR_ZERO(b);
+ if (ret)
+ goto err;
+
+ BUG_ON(b != btree_node_root(c, b));
+
+ k = bkey_i_to_s_c(&b->key);
+ ptrs = bch2_bkey_ptrs_c(k);
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ struct bpos bucket_pos;
+ struct bch_backpointer bp;
+
+ if (p.ptr.cached)
+ continue;
+
+ bch2_extent_ptr_to_bp(c, iter.btree_id, b->c.level + 1,
+ k, p, &bucket_pos, &bp);
+
+ ret = check_bp_exists(trans, bucket_pos, bp, k,
+ bucket_start, bucket_end,
+ last_flushed);
+ if (ret)
+ goto err;
+ }
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
+{
+ return (struct bbpos) {
+ .btree = bp.btree_id,
+ .pos = bp.pos,
+ };
+}
+
+static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
+{
+ struct sysinfo i;
+ u64 mem_bytes;
+
+ si_meminfo(&i);
+ mem_bytes = i.totalram * i.mem_unit;
+ return div_u64(mem_bytes >> 1, btree_bytes(c));
+}
+
+static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
+ unsigned btree_leaf_mask,
+ unsigned btree_interior_mask,
+ struct bbpos start, struct bbpos *end)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
+ enum btree_id btree;
+ int ret = 0;
+
+ for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
+ unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
+
+ if (!((1U << btree) & btree_leaf_mask) &&
+ !((1U << btree) & btree_interior_mask))
+ continue;
+
+ bch2_trans_node_iter_init(trans, &iter, btree,
+ btree == start.btree ? start.pos : POS_MIN,
+ 0, depth, 0);
+ /*
+ * for_each_btree_key_contineu() doesn't check the return value
+ * from bch2_btree_iter_advance(), which is needed when
+ * iterating over interior nodes where we'll see keys at
+ * SPOS_MAX:
+ */
+ do {
+ k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
+ ret = bkey_err(k);
+ if (!k.k || ret)
+ break;
+
+ --btree_nodes;
+ if (!btree_nodes) {
+ *end = BBPOS(btree, k.k->p);
+ bch2_trans_iter_exit(trans, &iter);
+ return 0;
+ }
+ } while (bch2_btree_iter_advance(&iter));
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ *end = BBPOS_MAX;
+ return ret;
+}
+
+static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
+ struct bpos bucket_start,
+ struct bpos bucket_end)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ enum btree_id btree_id;
+ struct bpos_level last_flushed = { UINT_MAX, POS_MIN };
+ int ret = 0;
+
+ for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
+ unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
+
+ bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
+ depth,
+ BTREE_ITER_ALL_LEVELS|
+ BTREE_ITER_PREFETCH);
+
+ do {
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL,
+ check_extent_to_backpointers(trans, &iter,
+ bucket_start, bucket_end,
+ &last_flushed));
+ if (ret)
+ break;
+ } while (!bch2_btree_iter_advance(&iter));
+
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ break;
+
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL,
+ check_btree_root_to_backpointers(trans, btree_id,
+ bucket_start, bucket_end,
+ &last_flushed));
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
+ struct bpos bucket)
+{
+ return bch2_dev_exists2(c, bucket.inode)
+ ? bucket_pos_to_bp(c, bucket, 0)
+ : bucket;
+}
+
+static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
+ struct bpos start, struct bpos *end)
+{
+ struct btree_iter alloc_iter;
+ struct btree_iter bp_iter;
+ struct bkey_s_c alloc_k, bp_k;
+ size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
+ bool alloc_end = false, bp_end = false;
+ int ret = 0;
+
+ bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
+ start, 0, 1, 0);
+ bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
+ bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
+ while (1) {
+ alloc_k = !alloc_end
+ ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
+ : bkey_s_c_null;
+ bp_k = !bp_end
+ ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
+ : bkey_s_c_null;
+
+ ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
+ if ((!alloc_k.k && !bp_k.k) || ret) {
+ *end = SPOS_MAX;
+ break;
+ }
+
+ --btree_nodes;
+ if (!btree_nodes) {
+ *end = alloc_k.k ? alloc_k.k->p : SPOS_MAX;
+ break;
+ }
+
+ if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
+ bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
+ if (!bch2_btree_iter_advance(&alloc_iter))
+ alloc_end = true;
+ } else {
+ if (!bch2_btree_iter_advance(&bp_iter))
+ bp_end = true;
+ }
+ }
+ bch2_trans_iter_exit(trans, &bp_iter);
+ bch2_trans_iter_exit(trans, &alloc_iter);
+ return ret;
+}
+
+int bch2_check_extents_to_backpointers(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct bpos start = POS_MIN, end;
+ int ret;
+
+ while (1) {
+ ret = bch2_get_alloc_in_memory_pos(trans, start, &end);
+ if (ret)
+ break;
+
+ if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
+ bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
+ __func__, btree_nodes_fit_in_ram(c));
+
+ if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "check_extents_to_backpointers(): ");
+ bch2_bpos_to_text(&buf, start);
+ prt_str(&buf, "-");
+ bch2_bpos_to_text(&buf, end);
+
+ bch_verbose(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ ret = bch2_check_extents_to_backpointers_pass(trans, start, end);
+ if (ret || bpos_eq(end, SPOS_MAX))
+ break;
+
+ start = bpos_successor(end);
+ }
+ bch2_trans_put(trans);
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int check_one_backpointer(struct btree_trans *trans,
+ struct bbpos start,
+ struct bbpos end,
+ struct bkey_s_c_backpointer bp,
+ struct bpos *last_flushed_pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bbpos pos = bp_to_bbpos(*bp.v);
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ if (bbpos_cmp(pos, start) < 0 ||
+ bbpos_cmp(pos, end) > 0)
+ return 0;
+
+ k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
+ ret = bkey_err(k);
+ if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
+ return 0;
+ if (ret)
+ return ret;
+
+ if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
+ *last_flushed_pos = bp.k->p;
+ ret = bch2_btree_write_buffer_flush_sync(trans) ?:
+ -BCH_ERR_transaction_restart_write_buffer_flush;
+ goto out;
+ }
+
+ if (fsck_err_on(!k.k, c,
+ "backpointer for missing extent\n %s",
+ (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
+ ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
+ goto out;
+ }
+out:
+fsck_err:
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
+ struct bbpos start,
+ struct bbpos end)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bpos last_flushed_pos = SPOS_MAX;
+
+ return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
+ POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_one_backpointer(trans, start, end,
+ bkey_s_c_to_backpointer(k),
+ &last_flushed_pos));
+}
+
+int bch2_check_backpointers_to_extents(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
+ int ret;
+
+ while (1) {
+ ret = bch2_get_btree_in_memory_pos(trans,
+ (1U << BTREE_ID_extents)|
+ (1U << BTREE_ID_reflink),
+ ~0,
+ start, &end);
+ if (ret)
+ break;
+
+ if (!bbpos_cmp(start, BBPOS_MIN) &&
+ bbpos_cmp(end, BBPOS_MAX))
+ bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
+ __func__, btree_nodes_fit_in_ram(c));
+
+ if (bbpos_cmp(start, BBPOS_MIN) ||
+ bbpos_cmp(end, BBPOS_MAX)) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "check_backpointers_to_extents(): ");
+ bch2_bbpos_to_text(&buf, start);
+ prt_str(&buf, "-");
+ bch2_bbpos_to_text(&buf, end);
+
+ bch_verbose(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ ret = bch2_check_backpointers_to_extents_pass(trans, start, end);
+ if (ret || !bbpos_cmp(end, BBPOS_MAX))
+ break;
+
+ start = bbpos_successor(end);
+ }
+ bch2_trans_put(trans);
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
new file mode 100644
index 000000000000..547e0617602a
--- /dev/null
+++ b/fs/bcachefs/backpointers.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BACKPOINTERS_BACKGROUND_H
+#define _BCACHEFS_BACKPOINTERS_BACKGROUND_H
+
+#include "btree_iter.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "super.h"
+
+int bch2_backpointer_invalid(const struct bch_fs *, struct bkey_s_c k,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
+void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+void bch2_backpointer_swab(struct bkey_s);
+
+#define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
+ .key_invalid = bch2_backpointer_invalid, \
+ .val_to_text = bch2_backpointer_k_to_text, \
+ .swab = bch2_backpointer_swab, \
+ .min_val_size = 32, \
+})
+
+#define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
+
+/*
+ * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
+ * btree:
+ */
+static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
+ struct bpos bp_pos)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
+ u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
+
+ return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
+}
+
+/*
+ * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
+ */
+static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
+ struct bpos bucket,
+ u64 bucket_offset)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
+ struct bpos ret;
+
+ ret = POS(bucket.inode,
+ (bucket_to_sector(ca, bucket.offset) <<
+ MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
+
+ EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
+
+ return ret;
+}
+
+int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bkey_i_backpointer *,
+ struct bch_backpointer, struct bkey_s_c, bool);
+
+static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
+ struct bpos bucket,
+ struct bch_backpointer bp,
+ struct bkey_s_c orig_k,
+ bool insert)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i_backpointer *bp_k;
+ int ret;
+
+ bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
+ ret = PTR_ERR_OR_ZERO(bp_k);
+ if (ret)
+ return ret;
+
+ bkey_backpointer_init(&bp_k->k_i);
+ bp_k->k.p = bucket_pos_to_bp(c, bucket, bp.bucket_offset);
+ bp_k->v = bp;
+
+ if (!insert) {
+ bp_k->k.type = KEY_TYPE_deleted;
+ set_bkey_val_u64s(&bp_k->k, 0);
+ }
+
+ if (unlikely(bch2_backpointers_no_use_write_buffer))
+ return bch2_bucket_backpointer_mod_nowritebuffer(trans, bp_k, bp, orig_k, insert);
+
+ return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k->k_i);
+}
+
+static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, struct extent_ptr_decoded p)
+{
+ return level ? BCH_DATA_btree :
+ p.has_ec ? BCH_DATA_stripe :
+ BCH_DATA_user;
+}
+
+static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, struct extent_ptr_decoded p,
+ struct bpos *bucket_pos, struct bch_backpointer *bp)
+{
+ enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
+ s64 sectors = level ? btree_sectors(c) : k.k->size;
+ u32 bucket_offset;
+
+ *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
+ *bp = (struct bch_backpointer) {
+ .btree_id = btree_id,
+ .level = level,
+ .data_type = data_type,
+ .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
+ p.crc.offset,
+ .bucket_len = ptr_disk_sectors(sectors, p),
+ .pos = k.k->p,
+ };
+}
+
+int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int,
+ struct bpos *, struct bch_backpointer *, unsigned);
+struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
+ struct bpos, struct bch_backpointer,
+ unsigned);
+struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *,
+ struct bpos, struct bch_backpointer);
+
+int bch2_check_btree_backpointers(struct bch_fs *);
+int bch2_check_extents_to_backpointers(struct bch_fs *);
+int bch2_check_backpointers_to_extents(struct bch_fs *);
+
+#endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */
diff --git a/fs/bcachefs/bbpos.h b/fs/bcachefs/bbpos.h
new file mode 100644
index 000000000000..1fbed1f8378d
--- /dev/null
+++ b/fs/bcachefs/bbpos.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BBPOS_H
+#define _BCACHEFS_BBPOS_H
+
+#include "bkey_methods.h"
+
+struct bbpos {
+ enum btree_id btree;
+ struct bpos pos;
+};
+
+static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos)
+{
+ return (struct bbpos) { btree, pos };
+}
+
+#define BBPOS_MIN BBPOS(0, POS_MIN)
+#define BBPOS_MAX BBPOS(BTREE_ID_NR - 1, POS_MAX)
+
+static inline int bbpos_cmp(struct bbpos l, struct bbpos r)
+{
+ return cmp_int(l.btree, r.btree) ?: bpos_cmp(l.pos, r.pos);
+}
+
+static inline struct bbpos bbpos_successor(struct bbpos pos)
+{
+ if (bpos_cmp(pos.pos, SPOS_MAX)) {
+ pos.pos = bpos_successor(pos.pos);
+ return pos;
+ }
+
+ if (pos.btree != BTREE_ID_NR) {
+ pos.btree++;
+ pos.pos = POS_MIN;
+ return pos;
+ }
+
+ BUG();
+}
+
+static inline void bch2_bbpos_to_text(struct printbuf *out, struct bbpos pos)
+{
+ prt_str(out, bch2_btree_ids[pos.btree]);
+ prt_char(out, ':');
+ bch2_bpos_to_text(out, pos.pos);
+}
+
+#endif /* _BCACHEFS_BBPOS_H */
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
new file mode 100644
index 000000000000..53ffa88cae16
--- /dev/null
+++ b/fs/bcachefs/bcachefs.h
@@ -0,0 +1,1156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_H
+#define _BCACHEFS_H
+
+/*
+ * SOME HIGH LEVEL CODE DOCUMENTATION:
+ *
+ * Bcache mostly works with cache sets, cache devices, and backing devices.
+ *
+ * Support for multiple cache devices hasn't quite been finished off yet, but
+ * it's about 95% plumbed through. A cache set and its cache devices is sort of
+ * like a md raid array and its component devices. Most of the code doesn't care
+ * about individual cache devices, the main abstraction is the cache set.
+ *
+ * Multiple cache devices is intended to give us the ability to mirror dirty
+ * cached data and metadata, without mirroring clean cached data.
+ *
+ * Backing devices are different, in that they have a lifetime independent of a
+ * cache set. When you register a newly formatted backing device it'll come up
+ * in passthrough mode, and then you can attach and detach a backing device from
+ * a cache set at runtime - while it's mounted and in use. Detaching implicitly
+ * invalidates any cached data for that backing device.
+ *
+ * A cache set can have multiple (many) backing devices attached to it.
+ *
+ * There's also flash only volumes - this is the reason for the distinction
+ * between struct cached_dev and struct bcache_device. A flash only volume
+ * works much like a bcache device that has a backing device, except the
+ * "cached" data is always dirty. The end result is that we get thin
+ * provisioning with very little additional code.
+ *
+ * Flash only volumes work but they're not production ready because the moving
+ * garbage collector needs more work. More on that later.
+ *
+ * BUCKETS/ALLOCATION:
+ *
+ * Bcache is primarily designed for caching, which means that in normal
+ * operation all of our available space will be allocated. Thus, we need an
+ * efficient way of deleting things from the cache so we can write new things to
+ * it.
+ *
+ * To do this, we first divide the cache device up into buckets. A bucket is the
+ * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
+ * works efficiently.
+ *
+ * Each bucket has a 16 bit priority, and an 8 bit generation associated with
+ * it. The gens and priorities for all the buckets are stored contiguously and
+ * packed on disk (in a linked list of buckets - aside from the superblock, all
+ * of bcache's metadata is stored in buckets).
+ *
+ * The priority is used to implement an LRU. We reset a bucket's priority when
+ * we allocate it or on cache it, and every so often we decrement the priority
+ * of each bucket. It could be used to implement something more sophisticated,
+ * if anyone ever gets around to it.
+ *
+ * The generation is used for invalidating buckets. Each pointer also has an 8
+ * bit generation embedded in it; for a pointer to be considered valid, its gen
+ * must match the gen of the bucket it points into. Thus, to reuse a bucket all
+ * we have to do is increment its gen (and write its new gen to disk; we batch
+ * this up).
+ *
+ * Bcache is entirely COW - we never write twice to a bucket, even buckets that
+ * contain metadata (including btree nodes).
+ *
+ * THE BTREE:
+ *
+ * Bcache is in large part design around the btree.
+ *
+ * At a high level, the btree is just an index of key -> ptr tuples.
+ *
+ * Keys represent extents, and thus have a size field. Keys also have a variable
+ * number of pointers attached to them (potentially zero, which is handy for
+ * invalidating the cache).
+ *
+ * The key itself is an inode:offset pair. The inode number corresponds to a
+ * backing device or a flash only volume. The offset is the ending offset of the
+ * extent within the inode - not the starting offset; this makes lookups
+ * slightly more convenient.
+ *
+ * Pointers contain the cache device id, the offset on that device, and an 8 bit
+ * generation number. More on the gen later.
+ *
+ * Index lookups are not fully abstracted - cache lookups in particular are
+ * still somewhat mixed in with the btree code, but things are headed in that
+ * direction.
+ *
+ * Updates are fairly well abstracted, though. There are two different ways of
+ * updating the btree; insert and replace.
+ *
+ * BTREE_INSERT will just take a list of keys and insert them into the btree -
+ * overwriting (possibly only partially) any extents they overlap with. This is
+ * used to update the index after a write.
+ *
+ * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
+ * overwriting a key that matches another given key. This is used for inserting
+ * data into the cache after a cache miss, and for background writeback, and for
+ * the moving garbage collector.
+ *
+ * There is no "delete" operation; deleting things from the index is
+ * accomplished by either by invalidating pointers (by incrementing a bucket's
+ * gen) or by inserting a key with 0 pointers - which will overwrite anything
+ * previously present at that location in the index.
+ *
+ * This means that there are always stale/invalid keys in the btree. They're
+ * filtered out by the code that iterates through a btree node, and removed when
+ * a btree node is rewritten.
+ *
+ * BTREE NODES:
+ *
+ * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
+ * free smaller than a bucket - so, that's how big our btree nodes are.
+ *
+ * (If buckets are really big we'll only use part of the bucket for a btree node
+ * - no less than 1/4th - but a bucket still contains no more than a single
+ * btree node. I'd actually like to change this, but for now we rely on the
+ * bucket's gen for deleting btree nodes when we rewrite/split a node.)
+ *
+ * Anyways, btree nodes are big - big enough to be inefficient with a textbook
+ * btree implementation.
+ *
+ * The way this is solved is that btree nodes are internally log structured; we
+ * can append new keys to an existing btree node without rewriting it. This
+ * means each set of keys we write is sorted, but the node is not.
+ *
+ * We maintain this log structure in memory - keeping 1Mb of keys sorted would
+ * be expensive, and we have to distinguish between the keys we have written and
+ * the keys we haven't. So to do a lookup in a btree node, we have to search
+ * each sorted set. But we do merge written sets together lazily, so the cost of
+ * these extra searches is quite low (normally most of the keys in a btree node
+ * will be in one big set, and then there'll be one or two sets that are much
+ * smaller).
+ *
+ * This log structure makes bcache's btree more of a hybrid between a
+ * conventional btree and a compacting data structure, with some of the
+ * advantages of both.
+ *
+ * GARBAGE COLLECTION:
+ *
+ * We can't just invalidate any bucket - it might contain dirty data or
+ * metadata. If it once contained dirty data, other writes might overwrite it
+ * later, leaving no valid pointers into that bucket in the index.
+ *
+ * Thus, the primary purpose of garbage collection is to find buckets to reuse.
+ * It also counts how much valid data it each bucket currently contains, so that
+ * allocation can reuse buckets sooner when they've been mostly overwritten.
+ *
+ * It also does some things that are really internal to the btree
+ * implementation. If a btree node contains pointers that are stale by more than
+ * some threshold, it rewrites the btree node to avoid the bucket's generation
+ * wrapping around. It also merges adjacent btree nodes if they're empty enough.
+ *
+ * THE JOURNAL:
+ *
+ * Bcache's journal is not necessary for consistency; we always strictly
+ * order metadata writes so that the btree and everything else is consistent on
+ * disk in the event of an unclean shutdown, and in fact bcache had writeback
+ * caching (with recovery from unclean shutdown) before journalling was
+ * implemented.
+ *
+ * Rather, the journal is purely a performance optimization; we can't complete a
+ * write until we've updated the index on disk, otherwise the cache would be
+ * inconsistent in the event of an unclean shutdown. This means that without the
+ * journal, on random write workloads we constantly have to update all the leaf
+ * nodes in the btree, and those writes will be mostly empty (appending at most
+ * a few keys each) - highly inefficient in terms of amount of metadata writes,
+ * and it puts more strain on the various btree resorting/compacting code.
+ *
+ * The journal is just a log of keys we've inserted; on startup we just reinsert
+ * all the keys in the open journal entries. That means that when we're updating
+ * a node in the btree, we can wait until a 4k block of keys fills up before
+ * writing them out.
+ *
+ * For simplicity, we only journal updates to leaf nodes; updates to parent
+ * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
+ * the complexity to deal with journalling them (in particular, journal replay)
+ * - updates to non leaf nodes just happen synchronously (see btree_split()).
+ */
+
+#undef pr_fmt
+#ifdef __KERNEL__
+#define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
+#else
+#define pr_fmt(fmt) "%s() " fmt "\n", __func__
+#endif
+
+#include <linux/backing-dev-defs.h>
+#include <linux/bug.h>
+#include <linux/bio.h>
+#include <linux/closure.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/percpu-refcount.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/rhashtable.h>
+#include <linux/rwsem.h>
+#include <linux/semaphore.h>
+#include <linux/seqlock.h>
+#include <linux/shrinker.h>
+#include <linux/srcu.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/zstd.h>
+
+#include "bcachefs_format.h"
+#include "errcode.h"
+#include "fifo.h"
+#include "nocow_locking_types.h"
+#include "opts.h"
+#include "recovery_types.h"
+#include "seqmutex.h"
+#include "util.h"
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+#define BCH_WRITE_REF_DEBUG
+#endif
+
+#ifndef dynamic_fault
+#define dynamic_fault(...) 0
+#endif
+
+#define race_fault(...) dynamic_fault("bcachefs:race")
+
+#define trace_and_count(_c, _name, ...) \
+do { \
+ this_cpu_inc((_c)->counters[BCH_COUNTER_##_name]); \
+ trace_##_name(__VA_ARGS__); \
+} while (0)
+
+#define bch2_fs_init_fault(name) \
+ dynamic_fault("bcachefs:bch_fs_init:" name)
+#define bch2_meta_read_fault(name) \
+ dynamic_fault("bcachefs:meta:read:" name)
+#define bch2_meta_write_fault(name) \
+ dynamic_fault("bcachefs:meta:write:" name)
+
+#ifdef __KERNEL__
+#define BCACHEFS_LOG_PREFIX
+#endif
+
+#ifdef BCACHEFS_LOG_PREFIX
+
+#define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
+#define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
+#define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
+#define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
+#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
+ "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
+
+#else
+
+#define bch2_log_msg(_c, fmt) fmt
+#define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
+#define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
+#define bch2_fmt_inum(_c, _inum, fmt) "inum %llu: " fmt "\n", (_inum)
+#define bch2_fmt_inum_offset(_c, _inum, _offset, fmt) \
+ "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
+
+#endif
+
+#define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
+
+#define bch_info(c, fmt, ...) \
+ printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_notice(c, fmt, ...) \
+ printk(KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_warn(c, fmt, ...) \
+ printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_warn_ratelimited(c, fmt, ...) \
+ printk_ratelimited(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
+
+#define bch_err(c, fmt, ...) \
+ printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_dev(ca, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
+#define bch_err_dev_offset(ca, _offset, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
+#define bch_err_inum(c, _inum, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
+#define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
+ printk(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
+
+#define bch_err_ratelimited(c, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
+#define bch_err_dev_ratelimited(ca, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
+#define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
+#define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
+#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
+ printk_ratelimited(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
+
+#define bch_err_fn(_c, _ret) \
+do { \
+ if (_ret && !bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
+ bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
+} while (0)
+
+#define bch_err_msg(_c, _ret, _msg, ...) \
+do { \
+ if (_ret && !bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
+ bch_err(_c, "%s(): error " _msg " %s", __func__, \
+ ##__VA_ARGS__, bch2_err_str(_ret)); \
+} while (0)
+
+#define bch_verbose(c, fmt, ...) \
+do { \
+ if ((c)->opts.verbose) \
+ bch_info(c, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define pr_verbose_init(opts, fmt, ...) \
+do { \
+ if (opt_get(opts, verbose)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+/* Parameters that are useful for debugging, but should always be compiled in: */
+#define BCH_DEBUG_PARAMS_ALWAYS() \
+ BCH_DEBUG_PARAM(key_merging_disabled, \
+ "Disables merging of extents") \
+ BCH_DEBUG_PARAM(btree_gc_always_rewrite, \
+ "Causes mark and sweep to compact and rewrite every " \
+ "btree node it traverses") \
+ BCH_DEBUG_PARAM(btree_gc_rewrite_disabled, \
+ "Disables rewriting of btree nodes during mark and sweep")\
+ BCH_DEBUG_PARAM(btree_shrinker_disabled, \
+ "Disables the shrinker callback for the btree node cache")\
+ BCH_DEBUG_PARAM(verify_btree_ondisk, \
+ "Reread btree nodes at various points to verify the " \
+ "mergesort in the read path against modifications " \
+ "done in memory") \
+ BCH_DEBUG_PARAM(verify_all_btree_replicas, \
+ "When reading btree nodes, read all replicas and " \
+ "compare them") \
+ BCH_DEBUG_PARAM(backpointers_no_use_write_buffer, \
+ "Don't use the write buffer for backpointers, enabling "\
+ "extra runtime checks")
+
+/* Parameters that should only be compiled in debug mode: */
+#define BCH_DEBUG_PARAMS_DEBUG() \
+ BCH_DEBUG_PARAM(expensive_debug_checks, \
+ "Enables various runtime debugging checks that " \
+ "significantly affect performance") \
+ BCH_DEBUG_PARAM(debug_check_iterators, \
+ "Enables extra verification for btree iterators") \
+ BCH_DEBUG_PARAM(debug_check_btree_accounting, \
+ "Verify btree accounting for keys within a node") \
+ BCH_DEBUG_PARAM(journal_seq_verify, \
+ "Store the journal sequence number in the version " \
+ "number of every btree key, and verify that btree " \
+ "update ordering is preserved during recovery") \
+ BCH_DEBUG_PARAM(inject_invalid_keys, \
+ "Store the journal sequence number in the version " \
+ "number of every btree key, and verify that btree " \
+ "update ordering is preserved during recovery") \
+ BCH_DEBUG_PARAM(test_alloc_startup, \
+ "Force allocator startup to use the slowpath where it" \
+ "can't find enough free buckets without invalidating" \
+ "cached data") \
+ BCH_DEBUG_PARAM(force_reconstruct_read, \
+ "Force reads to use the reconstruct path, when reading" \
+ "from erasure coded extents") \
+ BCH_DEBUG_PARAM(test_restart_gc, \
+ "Test restarting mark and sweep gc when bucket gens change")
+
+#define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+#define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL()
+#else
+#define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
+#endif
+
+#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
+BCH_DEBUG_PARAMS()
+#undef BCH_DEBUG_PARAM
+
+#ifndef CONFIG_BCACHEFS_DEBUG
+#define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name;
+BCH_DEBUG_PARAMS_DEBUG()
+#undef BCH_DEBUG_PARAM
+#endif
+
+#define BCH_TIME_STATS() \
+ x(btree_node_mem_alloc) \
+ x(btree_node_split) \
+ x(btree_node_compact) \
+ x(btree_node_merge) \
+ x(btree_node_sort) \
+ x(btree_node_read) \
+ x(btree_interior_update_foreground) \
+ x(btree_interior_update_total) \
+ x(btree_gc) \
+ x(data_write) \
+ x(data_read) \
+ x(data_promote) \
+ x(journal_flush_write) \
+ x(journal_noflush_write) \
+ x(journal_flush_seq) \
+ x(blocked_journal) \
+ x(blocked_allocate) \
+ x(blocked_allocate_open_bucket) \
+ x(nocow_lock_contended)
+
+enum bch_time_stats {
+#define x(name) BCH_TIME_##name,
+ BCH_TIME_STATS()
+#undef x
+ BCH_TIME_STAT_NR
+};
+
+#include "alloc_types.h"
+#include "btree_types.h"
+#include "btree_write_buffer_types.h"
+#include "buckets_types.h"
+#include "buckets_waiting_for_journal_types.h"
+#include "clock_types.h"
+#include "ec_types.h"
+#include "journal_types.h"
+#include "keylist_types.h"
+#include "quota_types.h"
+#include "rebalance_types.h"
+#include "replicas_types.h"
+#include "subvolume_types.h"
+#include "super_types.h"
+
+/* Number of nodes btree coalesce will try to coalesce at once */
+#define GC_MERGE_NODES 4U
+
+/* Maximum number of nodes we might need to allocate atomically: */
+#define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
+
+/* Size of the freelist we allocate btree nodes from: */
+#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
+
+#define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
+
+struct btree;
+
+enum gc_phase {
+ GC_PHASE_NOT_RUNNING,
+ GC_PHASE_START,
+ GC_PHASE_SB,
+
+ GC_PHASE_BTREE_stripes,
+ GC_PHASE_BTREE_extents,
+ GC_PHASE_BTREE_inodes,
+ GC_PHASE_BTREE_dirents,
+ GC_PHASE_BTREE_xattrs,
+ GC_PHASE_BTREE_alloc,
+ GC_PHASE_BTREE_quotas,
+ GC_PHASE_BTREE_reflink,
+ GC_PHASE_BTREE_subvolumes,
+ GC_PHASE_BTREE_snapshots,
+ GC_PHASE_BTREE_lru,
+ GC_PHASE_BTREE_freespace,
+ GC_PHASE_BTREE_need_discard,
+ GC_PHASE_BTREE_backpointers,
+ GC_PHASE_BTREE_bucket_gens,
+ GC_PHASE_BTREE_snapshot_trees,
+ GC_PHASE_BTREE_deleted_inodes,
+ GC_PHASE_BTREE_logged_ops,
+
+ GC_PHASE_PENDING_DELETE,
+};
+
+struct gc_pos {
+ enum gc_phase phase;
+ struct bpos pos;
+ unsigned level;
+};
+
+struct reflink_gc {
+ u64 offset;
+ u32 size;
+ u32 refcount;
+};
+
+typedef GENRADIX(struct reflink_gc) reflink_gc_table;
+
+struct io_count {
+ u64 sectors[2][BCH_DATA_NR];
+};
+
+struct bch_dev {
+ struct kobject kobj;
+ struct percpu_ref ref;
+ struct completion ref_completion;
+ struct percpu_ref io_ref;
+ struct completion io_ref_completion;
+
+ struct bch_fs *fs;
+
+ u8 dev_idx;
+ /*
+ * Cached version of this device's member info from superblock
+ * Committed by bch2_write_super() -> bch_fs_mi_update()
+ */
+ struct bch_member_cpu mi;
+ __uuid_t uuid;
+ char name[BDEVNAME_SIZE];
+
+ struct bch_sb_handle disk_sb;
+ struct bch_sb *sb_read_scratch;
+ int sb_write_error;
+ dev_t dev;
+ atomic_t flush_seq;
+
+ struct bch_devs_mask self;
+
+ /* biosets used in cloned bios for writing multiple replicas */
+ struct bio_set replica_set;
+
+ /*
+ * Buckets:
+ * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
+ * gc_lock, for device resize - holding any is sufficient for access:
+ * Or rcu_read_lock(), but only for ptr_stale():
+ */
+ struct bucket_array __rcu *buckets_gc;
+ struct bucket_gens __rcu *bucket_gens;
+ u8 *oldest_gen;
+ unsigned long *buckets_nouse;
+ struct rw_semaphore bucket_lock;
+
+ struct bch_dev_usage *usage_base;
+ struct bch_dev_usage __percpu *usage[JOURNAL_BUF_NR];
+ struct bch_dev_usage __percpu *usage_gc;
+
+ /* Allocator: */
+ u64 new_fs_bucket_idx;
+ u64 alloc_cursor;
+
+ unsigned nr_open_buckets;
+ unsigned nr_btree_reserve;
+
+ size_t inc_gen_needs_gc;
+ size_t inc_gen_really_needs_gc;
+ size_t buckets_waiting_on_journal;
+
+ atomic64_t rebalance_work;
+
+ struct journal_device journal;
+ u64 prev_journal_sector;
+
+ struct work_struct io_error_work;
+
+ /* The rest of this all shows up in sysfs */
+ atomic64_t cur_latency[2];
+ struct bch2_time_stats io_latency[2];
+
+#define CONGESTED_MAX 1024
+ atomic_t congested;
+ u64 congested_last;
+
+ struct io_count __percpu *io_done;
+};
+
+enum {
+ /* startup: */
+ BCH_FS_STARTED,
+ BCH_FS_MAY_GO_RW,
+ BCH_FS_RW,
+ BCH_FS_WAS_RW,
+
+ /* shutdown: */
+ BCH_FS_STOPPING,
+ BCH_FS_EMERGENCY_RO,
+ BCH_FS_GOING_RO,
+ BCH_FS_WRITE_DISABLE_COMPLETE,
+ BCH_FS_CLEAN_SHUTDOWN,
+
+ /* fsck passes: */
+ BCH_FS_FSCK_DONE,
+ BCH_FS_INITIAL_GC_UNFIXED, /* kill when we enumerate fsck errors */
+ BCH_FS_NEED_ANOTHER_GC,
+
+ BCH_FS_HAVE_DELETED_SNAPSHOTS,
+
+ /* errors: */
+ BCH_FS_ERROR,
+ BCH_FS_TOPOLOGY_ERROR,
+ BCH_FS_ERRORS_FIXED,
+ BCH_FS_ERRORS_NOT_FIXED,
+};
+
+struct btree_debug {
+ unsigned id;
+};
+
+#define BCH_TRANSACTIONS_NR 128
+
+struct btree_transaction_stats {
+ struct bch2_time_stats lock_hold_times;
+ struct mutex lock;
+ unsigned nr_max_paths;
+ unsigned wb_updates_size;
+ unsigned max_mem;
+ char *max_paths_text;
+};
+
+struct bch_fs_pcpu {
+ u64 sectors_available;
+};
+
+struct journal_seq_blacklist_table {
+ size_t nr;
+ struct journal_seq_blacklist_table_entry {
+ u64 start;
+ u64 end;
+ bool dirty;
+ } entries[0];
+};
+
+struct journal_keys {
+ struct journal_key {
+ u64 journal_seq;
+ u32 journal_offset;
+ enum btree_id btree_id:8;
+ unsigned level:8;
+ bool allocated;
+ bool overwritten;
+ struct bkey_i *k;
+ } *d;
+ /*
+ * Gap buffer: instead of all the empty space in the array being at the
+ * end of the buffer - from @nr to @size - the empty space is at @gap.
+ * This means that sequential insertions are O(n) instead of O(n^2).
+ */
+ size_t gap;
+ size_t nr;
+ size_t size;
+};
+
+struct btree_trans_buf {
+ struct btree_trans *trans;
+};
+
+#define REPLICAS_DELTA_LIST_MAX (1U << 16)
+
+#define BCACHEFS_ROOT_SUBVOL_INUM \
+ ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO })
+
+#define BCH_WRITE_REFS() \
+ x(trans) \
+ x(write) \
+ x(promote) \
+ x(node_rewrite) \
+ x(stripe_create) \
+ x(stripe_delete) \
+ x(reflink) \
+ x(fallocate) \
+ x(discard) \
+ x(invalidate) \
+ x(delete_dead_snapshots) \
+ x(snapshot_delete_pagecache) \
+ x(sysfs)
+
+enum bch_write_ref {
+#define x(n) BCH_WRITE_REF_##n,
+ BCH_WRITE_REFS()
+#undef x
+ BCH_WRITE_REF_NR,
+};
+
+struct bch_fs {
+ struct closure cl;
+
+ struct list_head list;
+ struct kobject kobj;
+ struct kobject counters_kobj;
+ struct kobject internal;
+ struct kobject opts_dir;
+ struct kobject time_stats;
+ unsigned long flags;
+
+ int minor;
+ struct device *chardev;
+ struct super_block *vfs_sb;
+ dev_t dev;
+ char name[40];
+
+ /* ro/rw, add/remove/resize devices: */
+ struct rw_semaphore state_lock;
+
+ /* Counts outstanding writes, for clean transition to read-only */
+#ifdef BCH_WRITE_REF_DEBUG
+ atomic_long_t writes[BCH_WRITE_REF_NR];
+#else
+ struct percpu_ref writes;
+#endif
+ struct work_struct read_only_work;
+
+ struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
+
+ struct bch_replicas_cpu replicas;
+ struct bch_replicas_cpu replicas_gc;
+ struct mutex replicas_gc_lock;
+ mempool_t replicas_delta_pool;
+
+ struct journal_entry_res btree_root_journal_res;
+ struct journal_entry_res replicas_journal_res;
+ struct journal_entry_res clock_journal_res;
+ struct journal_entry_res dev_usage_journal_res;
+
+ struct bch_disk_groups_cpu __rcu *disk_groups;
+
+ struct bch_opts opts;
+
+ /* Updated by bch2_sb_update():*/
+ struct {
+ __uuid_t uuid;
+ __uuid_t user_uuid;
+
+ u16 version;
+ u16 version_min;
+ u16 version_upgrade_complete;
+
+ u8 nr_devices;
+ u8 clean;
+
+ u8 encryption_type;
+
+ u64 time_base_lo;
+ u32 time_base_hi;
+ unsigned time_units_per_sec;
+ unsigned nsec_per_time_unit;
+ u64 features;
+ u64 compat;
+ } sb;
+
+
+ struct bch_sb_handle disk_sb;
+
+ unsigned short block_bits; /* ilog2(block_size) */
+
+ u16 btree_foreground_merge_threshold;
+
+ struct closure sb_write;
+ struct mutex sb_lock;
+
+ /* snapshot.c: */
+ struct snapshot_table __rcu *snapshots;
+ size_t snapshot_table_size;
+ struct mutex snapshot_table_lock;
+ struct rw_semaphore snapshot_create_lock;
+
+ struct work_struct snapshot_delete_work;
+ struct work_struct snapshot_wait_for_pagecache_and_delete_work;
+ snapshot_id_list snapshots_unlinked;
+ struct mutex snapshots_unlinked_lock;
+
+ /* BTREE CACHE */
+ struct bio_set btree_bio;
+ struct workqueue_struct *io_complete_wq;
+
+ struct btree_root btree_roots_known[BTREE_ID_NR];
+ DARRAY(struct btree_root) btree_roots_extra;
+ struct mutex btree_root_lock;
+
+ struct btree_cache btree_cache;
+
+ /*
+ * Cache of allocated btree nodes - if we allocate a btree node and
+ * don't use it, if we free it that space can't be reused until going
+ * _all_ the way through the allocator (which exposes us to a livelock
+ * when allocating btree reserves fail halfway through) - instead, we
+ * can stick them here:
+ */
+ struct btree_alloc btree_reserve_cache[BTREE_NODE_RESERVE * 2];
+ unsigned btree_reserve_cache_nr;
+ struct mutex btree_reserve_cache_lock;
+
+ mempool_t btree_interior_update_pool;
+ struct list_head btree_interior_update_list;
+ struct list_head btree_interior_updates_unwritten;
+ struct mutex btree_interior_update_lock;
+ struct closure_waitlist btree_interior_update_wait;
+
+ struct workqueue_struct *btree_interior_update_worker;
+ struct work_struct btree_interior_update_work;
+
+ struct list_head pending_node_rewrites;
+ struct mutex pending_node_rewrites_lock;
+
+ /* btree_io.c: */
+ spinlock_t btree_write_error_lock;
+ struct btree_write_stats {
+ atomic64_t nr;
+ atomic64_t bytes;
+ } btree_write_stats[BTREE_WRITE_TYPE_NR];
+
+ /* btree_iter.c: */
+ struct seqmutex btree_trans_lock;
+ struct list_head btree_trans_list;
+ mempool_t btree_trans_pool;
+ mempool_t btree_trans_mem_pool;
+ struct btree_trans_buf __percpu *btree_trans_bufs;
+
+ struct srcu_struct btree_trans_barrier;
+ bool btree_trans_barrier_initialized;
+
+ struct btree_key_cache btree_key_cache;
+ unsigned btree_key_cache_btrees;
+
+ struct btree_write_buffer btree_write_buffer;
+
+ struct workqueue_struct *btree_update_wq;
+ struct workqueue_struct *btree_io_complete_wq;
+ /* copygc needs its own workqueue for index updates.. */
+ struct workqueue_struct *copygc_wq;
+ /*
+ * Use a dedicated wq for write ref holder tasks. Required to avoid
+ * dependency problems with other wq tasks that can block on ref
+ * draining, such as read-only transition.
+ */
+ struct workqueue_struct *write_ref_wq;
+
+ /* ALLOCATION */
+ struct bch_devs_mask rw_devs[BCH_DATA_NR];
+
+ u64 capacity; /* sectors */
+
+ /*
+ * When capacity _decreases_ (due to a disk being removed), we
+ * increment capacity_gen - this invalidates outstanding reservations
+ * and forces them to be revalidated
+ */
+ u32 capacity_gen;
+ unsigned bucket_size_max;
+
+ atomic64_t sectors_available;
+ struct mutex sectors_available_lock;
+
+ struct bch_fs_pcpu __percpu *pcpu;
+
+ struct percpu_rw_semaphore mark_lock;
+
+ seqcount_t usage_lock;
+ struct bch_fs_usage *usage_base;
+ struct bch_fs_usage __percpu *usage[JOURNAL_BUF_NR];
+ struct bch_fs_usage __percpu *usage_gc;
+ u64 __percpu *online_reserved;
+
+ /* single element mempool: */
+ struct mutex usage_scratch_lock;
+ struct bch_fs_usage_online *usage_scratch;
+
+ struct io_clock io_clock[2];
+
+ /* JOURNAL SEQ BLACKLIST */
+ struct journal_seq_blacklist_table *
+ journal_seq_blacklist_table;
+ struct work_struct journal_seq_blacklist_gc_work;
+
+ /* ALLOCATOR */
+ spinlock_t freelist_lock;
+ struct closure_waitlist freelist_wait;
+ u64 blocked_allocate;
+ u64 blocked_allocate_open_bucket;
+
+ open_bucket_idx_t open_buckets_freelist;
+ open_bucket_idx_t open_buckets_nr_free;
+ struct closure_waitlist open_buckets_wait;
+ struct open_bucket open_buckets[OPEN_BUCKETS_COUNT];
+ open_bucket_idx_t open_buckets_hash[OPEN_BUCKETS_COUNT];
+
+ open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
+ open_bucket_idx_t open_buckets_partial_nr;
+
+ struct write_point btree_write_point;
+ struct write_point rebalance_write_point;
+
+ struct write_point write_points[WRITE_POINT_MAX];
+ struct hlist_head write_points_hash[WRITE_POINT_HASH_NR];
+ struct mutex write_points_hash_lock;
+ unsigned write_points_nr;
+
+ struct buckets_waiting_for_journal buckets_waiting_for_journal;
+ struct work_struct discard_work;
+ struct work_struct invalidate_work;
+
+ /* GARBAGE COLLECTION */
+ struct task_struct *gc_thread;
+ atomic_t kick_gc;
+ unsigned long gc_count;
+
+ enum btree_id gc_gens_btree;
+ struct bpos gc_gens_pos;
+
+ /*
+ * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
+ * has been marked by GC.
+ *
+ * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
+ *
+ * Protected by gc_pos_lock. Only written to by GC thread, so GC thread
+ * can read without a lock.
+ */
+ seqcount_t gc_pos_lock;
+ struct gc_pos gc_pos;
+
+ /*
+ * The allocation code needs gc_mark in struct bucket to be correct, but
+ * it's not while a gc is in progress.
+ */
+ struct rw_semaphore gc_lock;
+ struct mutex gc_gens_lock;
+
+ /* IO PATH */
+ struct semaphore io_in_flight;
+ struct bio_set bio_read;
+ struct bio_set bio_read_split;
+ struct bio_set bio_write;
+ struct mutex bio_bounce_pages_lock;
+ mempool_t bio_bounce_pages;
+ struct bucket_nocow_lock_table
+ nocow_locks;
+ struct rhashtable promote_table;
+
+ mempool_t compression_bounce[2];
+ mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR];
+ mempool_t decompress_workspace;
+ ZSTD_parameters zstd_params;
+
+ struct crypto_shash *sha256;
+ struct crypto_sync_skcipher *chacha20;
+ struct crypto_shash *poly1305;
+
+ atomic64_t key_version;
+
+ mempool_t large_bkey_pool;
+
+ /* MOVE.C */
+ struct list_head moving_context_list;
+ struct mutex moving_context_lock;
+
+ struct list_head data_progress_list;
+ struct mutex data_progress_lock;
+
+ /* REBALANCE */
+ struct bch_fs_rebalance rebalance;
+
+ /* COPYGC */
+ struct task_struct *copygc_thread;
+ struct write_point copygc_write_point;
+ s64 copygc_wait_at;
+ s64 copygc_wait;
+ bool copygc_running;
+ wait_queue_head_t copygc_running_wq;
+
+ /* STRIPES: */
+ GENRADIX(struct stripe) stripes;
+ GENRADIX(struct gc_stripe) gc_stripes;
+
+ struct hlist_head ec_stripes_new[32];
+ spinlock_t ec_stripes_new_lock;
+
+ ec_stripes_heap ec_stripes_heap;
+ struct mutex ec_stripes_heap_lock;
+
+ /* ERASURE CODING */
+ struct list_head ec_stripe_head_list;
+ struct mutex ec_stripe_head_lock;
+
+ struct list_head ec_stripe_new_list;
+ struct mutex ec_stripe_new_lock;
+ wait_queue_head_t ec_stripe_new_wait;
+
+ struct work_struct ec_stripe_create_work;
+ u64 ec_stripe_hint;
+
+ struct work_struct ec_stripe_delete_work;
+
+ struct bio_set ec_bioset;
+
+ /* REFLINK */
+ reflink_gc_table reflink_gc_table;
+ size_t reflink_gc_nr;
+
+ /* fs.c */
+ struct list_head vfs_inodes_list;
+ struct mutex vfs_inodes_lock;
+
+ /* VFS IO PATH - fs-io.c */
+ struct bio_set writepage_bioset;
+ struct bio_set dio_write_bioset;
+ struct bio_set dio_read_bioset;
+ struct bio_set nocow_flush_bioset;
+
+ /* ERRORS */
+ struct list_head fsck_errors;
+ struct mutex fsck_error_lock;
+ bool fsck_alloc_err;
+
+ /* QUOTAS */
+ struct bch_memquota_type quotas[QTYP_NR];
+
+ /* RECOVERY */
+ u64 journal_replay_seq_start;
+ u64 journal_replay_seq_end;
+ enum bch_recovery_pass curr_recovery_pass;
+ /* bitmap of explicitly enabled recovery passes: */
+ u64 recovery_passes_explicit;
+ u64 recovery_passes_complete;
+
+ /* DEBUG JUNK */
+ struct dentry *fs_debug_dir;
+ struct dentry *btree_debug_dir;
+ struct btree_debug btree_debug[BTREE_ID_NR];
+ struct btree *verify_data;
+ struct btree_node *verify_ondisk;
+ struct mutex verify_lock;
+
+ u64 *unused_inode_hints;
+ unsigned inode_shard_bits;
+
+ /*
+ * A btree node on disk could have too many bsets for an iterator to fit
+ * on the stack - have to dynamically allocate them
+ */
+ mempool_t fill_iter;
+
+ mempool_t btree_bounce_pool;
+
+ struct journal journal;
+ GENRADIX(struct journal_replay *) journal_entries;
+ u64 journal_entries_base_seq;
+ struct journal_keys journal_keys;
+ struct list_head journal_iters;
+
+ u64 last_bucket_seq_cleanup;
+
+ u64 counters_on_mount[BCH_COUNTER_NR];
+ u64 __percpu *counters;
+
+ unsigned btree_gc_periodic:1;
+ unsigned copy_gc_enabled:1;
+ bool promote_whole_extents;
+
+ struct bch2_time_stats times[BCH_TIME_STAT_NR];
+
+ struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
+};
+
+extern struct wait_queue_head bch2_read_only_wait;
+
+static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref)
+{
+#ifdef BCH_WRITE_REF_DEBUG
+ atomic_long_inc(&c->writes[ref]);
+#else
+ percpu_ref_get(&c->writes);
+#endif
+}
+
+static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
+{
+#ifdef BCH_WRITE_REF_DEBUG
+ return !test_bit(BCH_FS_GOING_RO, &c->flags) &&
+ atomic_long_inc_not_zero(&c->writes[ref]);
+#else
+ return percpu_ref_tryget_live(&c->writes);
+#endif
+}
+
+static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref)
+{
+#ifdef BCH_WRITE_REF_DEBUG
+ long v = atomic_long_dec_return(&c->writes[ref]);
+
+ BUG_ON(v < 0);
+ if (v)
+ return;
+ for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
+ if (atomic_long_read(&c->writes[i]))
+ return;
+
+ set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
+ wake_up(&bch2_read_only_wait);
+#else
+ percpu_ref_put(&c->writes);
+#endif
+}
+
+static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
+{
+#ifndef NO_BCACHEFS_FS
+ if (c->vfs_sb)
+ c->vfs_sb->s_bdi->ra_pages = ra_pages;
+#endif
+}
+
+static inline unsigned bucket_bytes(const struct bch_dev *ca)
+{
+ return ca->mi.bucket_size << 9;
+}
+
+static inline unsigned block_bytes(const struct bch_fs *c)
+{
+ return c->opts.block_size;
+}
+
+static inline unsigned block_sectors(const struct bch_fs *c)
+{
+ return c->opts.block_size >> 9;
+}
+
+static inline size_t btree_sectors(const struct bch_fs *c)
+{
+ return c->opts.btree_node_size >> 9;
+}
+
+static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
+{
+ return c->btree_key_cache_btrees & (1U << btree);
+}
+
+static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
+{
+ struct timespec64 t;
+ s32 rem;
+
+ time += c->sb.time_base_lo;
+
+ t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
+ t.tv_nsec = rem * c->sb.nsec_per_time_unit;
+ return t;
+}
+
+static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
+{
+ return (ts.tv_sec * c->sb.time_units_per_sec +
+ (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
+}
+
+static inline s64 bch2_current_time(const struct bch_fs *c)
+{
+ struct timespec64 now;
+
+ ktime_get_coarse_real_ts64(&now);
+ return timespec_to_bch2_time(c, now);
+}
+
+static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
+{
+ return dev < c->sb.nr_devices && c->devs[dev];
+}
+
+#define BKEY_PADDED_ONSTACK(key, pad) \
+ struct { struct bkey_i key; __u64 key ## _pad[pad]; }
+
+#endif /* _BCACHEFS_H */
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
new file mode 100644
index 000000000000..99749f3315fe
--- /dev/null
+++ b/fs/bcachefs/bcachefs_format.h
@@ -0,0 +1,2413 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FORMAT_H
+#define _BCACHEFS_FORMAT_H
+
+/*
+ * bcachefs on disk data structures
+ *
+ * OVERVIEW:
+ *
+ * There are three main types of on disk data structures in bcachefs (this is
+ * reduced from 5 in bcache)
+ *
+ * - superblock
+ * - journal
+ * - btree
+ *
+ * The btree is the primary structure; most metadata exists as keys in the
+ * various btrees. There are only a small number of btrees, they're not
+ * sharded - we have one btree for extents, another for inodes, et cetera.
+ *
+ * SUPERBLOCK:
+ *
+ * The superblock contains the location of the journal, the list of devices in
+ * the filesystem, and in general any metadata we need in order to decide
+ * whether we can start a filesystem or prior to reading the journal/btree
+ * roots.
+ *
+ * The superblock is extensible, and most of the contents of the superblock are
+ * in variable length, type tagged fields; see struct bch_sb_field.
+ *
+ * Backup superblocks do not reside in a fixed location; also, superblocks do
+ * not have a fixed size. To locate backup superblocks we have struct
+ * bch_sb_layout; we store a copy of this inside every superblock, and also
+ * before the first superblock.
+ *
+ * JOURNAL:
+ *
+ * The journal primarily records btree updates in the order they occurred;
+ * journal replay consists of just iterating over all the keys in the open
+ * journal entries and re-inserting them into the btrees.
+ *
+ * The journal also contains entry types for the btree roots, and blacklisted
+ * journal sequence numbers (see journal_seq_blacklist.c).
+ *
+ * BTREE:
+ *
+ * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
+ * 128k-256k) and log structured. We use struct btree_node for writing the first
+ * entry in a given node (offset 0), and struct btree_node_entry for all
+ * subsequent writes.
+ *
+ * After the header, btree node entries contain a list of keys in sorted order.
+ * Values are stored inline with the keys; since values are variable length (and
+ * keys effectively are variable length too, due to packing) we can't do random
+ * access without building up additional in memory tables in the btree node read
+ * path.
+ *
+ * BTREE KEYS (struct bkey):
+ *
+ * The various btrees share a common format for the key - so as to avoid
+ * switching in fastpath lookup/comparison code - but define their own
+ * structures for the key values.
+ *
+ * The size of a key/value pair is stored as a u8 in units of u64s, so the max
+ * size is just under 2k. The common part also contains a type tag for the
+ * value, and a format field indicating whether the key is packed or not (and
+ * also meant to allow adding new key fields in the future, if desired).
+ *
+ * bkeys, when stored within a btree node, may also be packed. In that case, the
+ * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
+ * be generous with field sizes in the common part of the key format (64 bit
+ * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
+ */
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/kernel.h>
+#include <linux/uuid.h>
+#include "vstructs.h"
+
+#ifdef __KERNEL__
+typedef uuid_t __uuid_t;
+#endif
+
+#define BITMASK(name, type, field, offset, end) \
+static const __maybe_unused unsigned name##_OFFSET = offset; \
+static const __maybe_unused unsigned name##_BITS = (end - offset); \
+ \
+static inline __u64 name(const type *k) \
+{ \
+ return (k->field >> offset) & ~(~0ULL << (end - offset)); \
+} \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ k->field &= ~(~(~0ULL << (end - offset)) << offset); \
+ k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
+}
+
+#define LE_BITMASK(_bits, name, type, field, offset, end) \
+static const __maybe_unused unsigned name##_OFFSET = offset; \
+static const __maybe_unused unsigned name##_BITS = (end - offset); \
+static const __maybe_unused __u##_bits name##_MAX = (1ULL << (end - offset)) - 1;\
+ \
+static inline __u64 name(const type *k) \
+{ \
+ return (__le##_bits##_to_cpu(k->field) >> offset) & \
+ ~(~0ULL << (end - offset)); \
+} \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ __u##_bits new = __le##_bits##_to_cpu(k->field); \
+ \
+ new &= ~(~(~0ULL << (end - offset)) << offset); \
+ new |= (v & ~(~0ULL << (end - offset))) << offset; \
+ k->field = __cpu_to_le##_bits(new); \
+}
+
+#define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
+#define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
+#define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
+
+struct bkey_format {
+ __u8 key_u64s;
+ __u8 nr_fields;
+ /* One unused slot for now: */
+ __u8 bits_per_field[6];
+ __le64 field_offset[6];
+};
+
+/* Btree keys - all units are in sectors */
+
+struct bpos {
+ /*
+ * Word order matches machine byte order - btree code treats a bpos as a
+ * single large integer, for search/comparison purposes
+ *
+ * Note that wherever a bpos is embedded in another on disk data
+ * structure, it has to be byte swabbed when reading in metadata that
+ * wasn't written in native endian order:
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ __u32 snapshot;
+ __u64 offset;
+ __u64 inode;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ __u64 inode;
+ __u64 offset; /* Points to end of extent - sectors */
+ __u32 snapshot;
+#else
+#error edit for your odd byteorder.
+#endif
+} __packed __aligned(4);
+
+#define KEY_INODE_MAX ((__u64)~0ULL)
+#define KEY_OFFSET_MAX ((__u64)~0ULL)
+#define KEY_SNAPSHOT_MAX ((__u32)~0U)
+#define KEY_SIZE_MAX ((__u32)~0U)
+
+static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
+{
+ return (struct bpos) {
+ .inode = inode,
+ .offset = offset,
+ .snapshot = snapshot,
+ };
+}
+
+#define POS_MIN SPOS(0, 0, 0)
+#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
+#define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
+#define POS(_inode, _offset) SPOS(_inode, _offset, 0)
+
+/* Empty placeholder struct, for container_of() */
+struct bch_val {
+ __u64 __nothing[0];
+};
+
+struct bversion {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ __u64 lo;
+ __u32 hi;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ __u32 hi;
+ __u64 lo;
+#endif
+} __packed __aligned(4);
+
+struct bkey {
+ /* Size of combined key and value, in u64s */
+ __u8 u64s;
+
+ /* Format of key (0 for format local to btree node) */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 format:7,
+ needs_whiteout:1;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 needs_whiteout:1,
+ format:7;
+#else
+#error edit for your odd byteorder.
+#endif
+
+ /* Type of the value */
+ __u8 type;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ __u8 pad[1];
+
+ struct bversion version;
+ __u32 size; /* extent size, in sectors */
+ struct bpos p;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ struct bpos p;
+ __u32 size; /* extent size, in sectors */
+ struct bversion version;
+
+ __u8 pad[1];
+#endif
+} __packed __aligned(8);
+
+struct bkey_packed {
+ __u64 _data[0];
+
+ /* Size of combined key and value, in u64s */
+ __u8 u64s;
+
+ /* Format of key (0 for format local to btree node) */
+
+ /*
+ * XXX: next incompat on disk format change, switch format and
+ * needs_whiteout - bkey_packed() will be cheaper if format is the high
+ * bits of the bitfield
+ */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 format:7,
+ needs_whiteout:1;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u8 needs_whiteout:1,
+ format:7;
+#endif
+
+ /* Type of the value */
+ __u8 type;
+ __u8 key_start[0];
+
+ /*
+ * We copy bkeys with struct assignment in various places, and while
+ * that shouldn't be done with packed bkeys we can't disallow it in C,
+ * and it's legal to cast a bkey to a bkey_packed - so padding it out
+ * to the same size as struct bkey should hopefully be safest.
+ */
+ __u8 pad[sizeof(struct bkey) - 3];
+} __packed __aligned(8);
+
+typedef struct {
+ __le64 lo;
+ __le64 hi;
+} bch_le128;
+
+#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
+#define BKEY_U64s_MAX U8_MAX
+#define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
+
+#define KEY_PACKED_BITS_START 24
+
+#define KEY_FORMAT_LOCAL_BTREE 0
+#define KEY_FORMAT_CURRENT 1
+
+enum bch_bkey_fields {
+ BKEY_FIELD_INODE,
+ BKEY_FIELD_OFFSET,
+ BKEY_FIELD_SNAPSHOT,
+ BKEY_FIELD_SIZE,
+ BKEY_FIELD_VERSION_HI,
+ BKEY_FIELD_VERSION_LO,
+ BKEY_NR_FIELDS,
+};
+
+#define bkey_format_field(name, field) \
+ [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
+
+#define BKEY_FORMAT_CURRENT \
+((struct bkey_format) { \
+ .key_u64s = BKEY_U64s, \
+ .nr_fields = BKEY_NR_FIELDS, \
+ .bits_per_field = { \
+ bkey_format_field(INODE, p.inode), \
+ bkey_format_field(OFFSET, p.offset), \
+ bkey_format_field(SNAPSHOT, p.snapshot), \
+ bkey_format_field(SIZE, size), \
+ bkey_format_field(VERSION_HI, version.hi), \
+ bkey_format_field(VERSION_LO, version.lo), \
+ }, \
+})
+
+/* bkey with inline value */
+struct bkey_i {
+ __u64 _data[0];
+
+ struct bkey k;
+ struct bch_val v;
+};
+
+#define KEY(_inode, _offset, _size) \
+((struct bkey) { \
+ .u64s = BKEY_U64s, \
+ .format = KEY_FORMAT_CURRENT, \
+ .p = POS(_inode, _offset), \
+ .size = _size, \
+})
+
+static inline void bkey_init(struct bkey *k)
+{
+ *k = KEY(0, 0, 0);
+}
+
+#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
+
+#define __BKEY_PADDED(key, pad) \
+ struct bkey_i key; __u64 key ## _pad[pad]
+
+/*
+ * - DELETED keys are used internally to mark keys that should be ignored but
+ * override keys in composition order. Their version number is ignored.
+ *
+ * - DISCARDED keys indicate that the data is all 0s because it has been
+ * discarded. DISCARDs may have a version; if the version is nonzero the key
+ * will be persistent, otherwise the key will be dropped whenever the btree
+ * node is rewritten (like DELETED keys).
+ *
+ * - ERROR: any read of the data returns a read error, as the data was lost due
+ * to a failing device. Like DISCARDED keys, they can be removed (overridden)
+ * by new writes or cluster-wide GC. Node repair can also overwrite them with
+ * the same or a more recent version number, but not with an older version
+ * number.
+ *
+ * - WHITEOUT: for hash table btrees
+ */
+#define BCH_BKEY_TYPES() \
+ x(deleted, 0) \
+ x(whiteout, 1) \
+ x(error, 2) \
+ x(cookie, 3) \
+ x(hash_whiteout, 4) \
+ x(btree_ptr, 5) \
+ x(extent, 6) \
+ x(reservation, 7) \
+ x(inode, 8) \
+ x(inode_generation, 9) \
+ x(dirent, 10) \
+ x(xattr, 11) \
+ x(alloc, 12) \
+ x(quota, 13) \
+ x(stripe, 14) \
+ x(reflink_p, 15) \
+ x(reflink_v, 16) \
+ x(inline_data, 17) \
+ x(btree_ptr_v2, 18) \
+ x(indirect_inline_data, 19) \
+ x(alloc_v2, 20) \
+ x(subvolume, 21) \
+ x(snapshot, 22) \
+ x(inode_v2, 23) \
+ x(alloc_v3, 24) \
+ x(set, 25) \
+ x(lru, 26) \
+ x(alloc_v4, 27) \
+ x(backpointer, 28) \
+ x(inode_v3, 29) \
+ x(bucket_gens, 30) \
+ x(snapshot_tree, 31) \
+ x(logged_op_truncate, 32) \
+ x(logged_op_finsert, 33)
+
+enum bch_bkey_type {
+#define x(name, nr) KEY_TYPE_##name = nr,
+ BCH_BKEY_TYPES()
+#undef x
+ KEY_TYPE_MAX,
+};
+
+struct bch_deleted {
+ struct bch_val v;
+};
+
+struct bch_whiteout {
+ struct bch_val v;
+};
+
+struct bch_error {
+ struct bch_val v;
+};
+
+struct bch_cookie {
+ struct bch_val v;
+ __le64 cookie;
+};
+
+struct bch_hash_whiteout {
+ struct bch_val v;
+};
+
+struct bch_set {
+ struct bch_val v;
+};
+
+/* Extents */
+
+/*
+ * In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
+ * preceded by checksum/compression information (bch_extent_crc32 or
+ * bch_extent_crc64).
+ *
+ * One major determining factor in the format of extents is how we handle and
+ * represent extents that have been partially overwritten and thus trimmed:
+ *
+ * If an extent is not checksummed or compressed, when the extent is trimmed we
+ * don't have to remember the extent we originally allocated and wrote: we can
+ * merely adjust ptr->offset to point to the start of the data that is currently
+ * live. The size field in struct bkey records the current (live) size of the
+ * extent, and is also used to mean "size of region on disk that we point to" in
+ * this case.
+ *
+ * Thus an extent that is not checksummed or compressed will consist only of a
+ * list of bch_extent_ptrs, with none of the fields in
+ * bch_extent_crc32/bch_extent_crc64.
+ *
+ * When an extent is checksummed or compressed, it's not possible to read only
+ * the data that is currently live: we have to read the entire extent that was
+ * originally written, and then return only the part of the extent that is
+ * currently live.
+ *
+ * Thus, in addition to the current size of the extent in struct bkey, we need
+ * to store the size of the originally allocated space - this is the
+ * compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
+ * when the extent is trimmed, instead of modifying the offset field of the
+ * pointer, we keep a second smaller offset field - "offset into the original
+ * extent of the currently live region".
+ *
+ * The other major determining factor is replication and data migration:
+ *
+ * Each pointer may have its own bch_extent_crc32/64. When doing a replicated
+ * write, we will initially write all the replicas in the same format, with the
+ * same checksum type and compression format - however, when copygc runs later (or
+ * tiering/cache promotion, anything that moves data), it is not in general
+ * going to rewrite all the pointers at once - one of the replicas may be in a
+ * bucket on one device that has very little fragmentation while another lives
+ * in a bucket that has become heavily fragmented, and thus is being rewritten
+ * sooner than the rest.
+ *
+ * Thus it will only move a subset of the pointers (or in the case of
+ * tiering/cache promotion perhaps add a single pointer without dropping any
+ * current pointers), and if the extent has been partially overwritten it must
+ * write only the currently live portion (or copygc would not be able to reduce
+ * fragmentation!) - which necessitates a different bch_extent_crc format for
+ * the new pointer.
+ *
+ * But in the interests of space efficiency, we don't want to store one
+ * bch_extent_crc for each pointer if we don't have to.
+ *
+ * Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
+ * bch_extent_ptrs appended arbitrarily one after the other. We determine the
+ * type of a given entry with a scheme similar to utf8 (except we're encoding a
+ * type, not a size), encoding the type in the position of the first set bit:
+ *
+ * bch_extent_crc32 - 0b1
+ * bch_extent_ptr - 0b10
+ * bch_extent_crc64 - 0b100
+ *
+ * We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
+ * bch_extent_crc64 is the least constrained).
+ *
+ * Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
+ * until the next bch_extent_crc32/64.
+ *
+ * If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
+ * is neither checksummed nor compressed.
+ */
+
+/* 128 bits, sufficient for cryptographic MACs: */
+struct bch_csum {
+ __le64 lo;
+ __le64 hi;
+} __packed __aligned(8);
+
+#define BCH_EXTENT_ENTRY_TYPES() \
+ x(ptr, 0) \
+ x(crc32, 1) \
+ x(crc64, 2) \
+ x(crc128, 3) \
+ x(stripe_ptr, 4) \
+ x(rebalance, 5)
+#define BCH_EXTENT_ENTRY_MAX 6
+
+enum bch_extent_entry_type {
+#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
+ BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+/* Compressed/uncompressed size are stored biased by 1: */
+struct bch_extent_crc32 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u32 type:2,
+ _compressed_size:7,
+ _uncompressed_size:7,
+ offset:7,
+ _unused:1,
+ csum_type:4,
+ compression_type:4;
+ __u32 csum;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u32 csum;
+ __u32 compression_type:4,
+ csum_type:4,
+ _unused:1,
+ offset:7,
+ _uncompressed_size:7,
+ _compressed_size:7,
+ type:2;
+#endif
+} __packed __aligned(8);
+
+#define CRC32_SIZE_MAX (1U << 7)
+#define CRC32_NONCE_MAX 0
+
+struct bch_extent_crc64 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:3,
+ _compressed_size:9,
+ _uncompressed_size:9,
+ offset:9,
+ nonce:10,
+ csum_type:4,
+ compression_type:4,
+ csum_hi:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 csum_hi:16,
+ compression_type:4,
+ csum_type:4,
+ nonce:10,
+ offset:9,
+ _uncompressed_size:9,
+ _compressed_size:9,
+ type:3;
+#endif
+ __u64 csum_lo;
+} __packed __aligned(8);
+
+#define CRC64_SIZE_MAX (1U << 9)
+#define CRC64_NONCE_MAX ((1U << 10) - 1)
+
+struct bch_extent_crc128 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:4,
+ _compressed_size:13,
+ _uncompressed_size:13,
+ offset:13,
+ nonce:13,
+ csum_type:4,
+ compression_type:4;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 compression_type:4,
+ csum_type:4,
+ nonce:13,
+ offset:13,
+ _uncompressed_size:13,
+ _compressed_size:13,
+ type:4;
+#endif
+ struct bch_csum csum;
+} __packed __aligned(8);
+
+#define CRC128_SIZE_MAX (1U << 13)
+#define CRC128_NONCE_MAX ((1U << 13) - 1)
+
+/*
+ * @reservation - pointer hasn't been written to, just reserved
+ */
+struct bch_extent_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:1,
+ cached:1,
+ unused:1,
+ unwritten:1,
+ offset:44, /* 8 petabytes */
+ dev:8,
+ gen:8;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 gen:8,
+ dev:8,
+ offset:44,
+ unwritten:1,
+ unused:1,
+ cached:1,
+ type:1;
+#endif
+} __packed __aligned(8);
+
+struct bch_extent_stripe_ptr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:5,
+ block:8,
+ redundancy:4,
+ idx:47;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 idx:47,
+ redundancy:4,
+ block:8,
+ type:5;
+#endif
+};
+
+struct bch_extent_reservation {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:6,
+ unused:22,
+ replicas:4,
+ generation:32;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 generation:32,
+ replicas:4,
+ unused:22,
+ type:6;
+#endif
+};
+
+struct bch_extent_rebalance {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:7,
+ unused:33,
+ compression:8,
+ target:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 target:16,
+ compression:8,
+ unused:33,
+ type:7;
+#endif
+};
+
+union bch_extent_entry {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
+ unsigned long type;
+#elif __BITS_PER_LONG == 32
+ struct {
+ unsigned long pad;
+ unsigned long type;
+ };
+#else
+#error edit for your odd byteorder.
+#endif
+
+#define x(f, n) struct bch_extent_##f f;
+ BCH_EXTENT_ENTRY_TYPES()
+#undef x
+};
+
+struct bch_btree_ptr {
+ struct bch_val v;
+
+ __u64 _data[0];
+ struct bch_extent_ptr start[];
+} __packed __aligned(8);
+
+struct bch_btree_ptr_v2 {
+ struct bch_val v;
+
+ __u64 mem_ptr;
+ __le64 seq;
+ __le16 sectors_written;
+ __le16 flags;
+ struct bpos min_key;
+ __u64 _data[0];
+ struct bch_extent_ptr start[];
+} __packed __aligned(8);
+
+LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
+
+struct bch_extent {
+ struct bch_val v;
+
+ __u64 _data[0];
+ union bch_extent_entry start[];
+} __packed __aligned(8);
+
+struct bch_reservation {
+ struct bch_val v;
+
+ __le32 generation;
+ __u8 nr_replicas;
+ __u8 pad[3];
+} __packed __aligned(8);
+
+/* Maximum size (in u64s) a single pointer could be: */
+#define BKEY_EXTENT_PTR_U64s_MAX\
+ ((sizeof(struct bch_extent_crc128) + \
+ sizeof(struct bch_extent_ptr)) / sizeof(__u64))
+
+/* Maximum possible size of an entire extent value: */
+#define BKEY_EXTENT_VAL_U64s_MAX \
+ (1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
+
+/* * Maximum possible size of an entire extent, key + value: */
+#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
+
+/* Btree pointers don't carry around checksums: */
+#define BKEY_BTREE_PTR_VAL_U64s_MAX \
+ ((sizeof(struct bch_btree_ptr_v2) + \
+ sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
+#define BKEY_BTREE_PTR_U64s_MAX \
+ (BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
+
+/* Inodes */
+
+#define BLOCKDEV_INODE_MAX 4096
+
+#define BCACHEFS_ROOT_INO 4096
+
+struct bch_inode {
+ struct bch_val v;
+
+ __le64 bi_hash_seed;
+ __le32 bi_flags;
+ __le16 bi_mode;
+ __u8 fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v2 {
+ struct bch_val v;
+
+ __le64 bi_journal_seq;
+ __le64 bi_hash_seed;
+ __le64 bi_flags;
+ __le16 bi_mode;
+ __u8 fields[];
+} __packed __aligned(8);
+
+struct bch_inode_v3 {
+ struct bch_val v;
+
+ __le64 bi_journal_seq;
+ __le64 bi_hash_seed;
+ __le64 bi_flags;
+ __le64 bi_sectors;
+ __le64 bi_size;
+ __le64 bi_version;
+ __u8 fields[];
+} __packed __aligned(8);
+
+#define INODEv3_FIELDS_START_INITIAL 6
+#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
+
+struct bch_inode_generation {
+ struct bch_val v;
+
+ __le32 bi_generation;
+ __le32 pad;
+} __packed __aligned(8);
+
+/*
+ * bi_subvol and bi_parent_subvol are only set for subvolume roots:
+ */
+
+#define BCH_INODE_FIELDS_v2() \
+ x(bi_atime, 96) \
+ x(bi_ctime, 96) \
+ x(bi_mtime, 96) \
+ x(bi_otime, 96) \
+ x(bi_size, 64) \
+ x(bi_sectors, 64) \
+ x(bi_uid, 32) \
+ x(bi_gid, 32) \
+ x(bi_nlink, 32) \
+ x(bi_generation, 32) \
+ x(bi_dev, 32) \
+ x(bi_data_checksum, 8) \
+ x(bi_compression, 8) \
+ x(bi_project, 32) \
+ x(bi_background_compression, 8) \
+ x(bi_data_replicas, 8) \
+ x(bi_promote_target, 16) \
+ x(bi_foreground_target, 16) \
+ x(bi_background_target, 16) \
+ x(bi_erasure_code, 16) \
+ x(bi_fields_set, 16) \
+ x(bi_dir, 64) \
+ x(bi_dir_offset, 64) \
+ x(bi_subvol, 32) \
+ x(bi_parent_subvol, 32)
+
+#define BCH_INODE_FIELDS_v3() \
+ x(bi_atime, 96) \
+ x(bi_ctime, 96) \
+ x(bi_mtime, 96) \
+ x(bi_otime, 96) \
+ x(bi_uid, 32) \
+ x(bi_gid, 32) \
+ x(bi_nlink, 32) \
+ x(bi_generation, 32) \
+ x(bi_dev, 32) \
+ x(bi_data_checksum, 8) \
+ x(bi_compression, 8) \
+ x(bi_project, 32) \
+ x(bi_background_compression, 8) \
+ x(bi_data_replicas, 8) \
+ x(bi_promote_target, 16) \
+ x(bi_foreground_target, 16) \
+ x(bi_background_target, 16) \
+ x(bi_erasure_code, 16) \
+ x(bi_fields_set, 16) \
+ x(bi_dir, 64) \
+ x(bi_dir_offset, 64) \
+ x(bi_subvol, 32) \
+ x(bi_parent_subvol, 32) \
+ x(bi_nocow, 8)
+
+/* subset of BCH_INODE_FIELDS */
+#define BCH_INODE_OPTS() \
+ x(data_checksum, 8) \
+ x(compression, 8) \
+ x(project, 32) \
+ x(background_compression, 8) \
+ x(data_replicas, 8) \
+ x(promote_target, 16) \
+ x(foreground_target, 16) \
+ x(background_target, 16) \
+ x(erasure_code, 16) \
+ x(nocow, 8)
+
+enum inode_opt_id {
+#define x(name, ...) \
+ Inode_opt_##name,
+ BCH_INODE_OPTS()
+#undef x
+ Inode_opt_nr,
+};
+
+enum {
+ /*
+ * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
+ * flags)
+ */
+ __BCH_INODE_SYNC = 0,
+ __BCH_INODE_IMMUTABLE = 1,
+ __BCH_INODE_APPEND = 2,
+ __BCH_INODE_NODUMP = 3,
+ __BCH_INODE_NOATIME = 4,
+
+ __BCH_INODE_I_SIZE_DIRTY = 5, /* obsolete */
+ __BCH_INODE_I_SECTORS_DIRTY = 6, /* obsolete */
+ __BCH_INODE_UNLINKED = 7,
+ __BCH_INODE_BACKPTR_UNTRUSTED = 8,
+
+ /* bits 20+ reserved for packed fields below: */
+};
+
+#define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
+#define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
+#define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
+#define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
+#define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
+#define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
+#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
+#define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
+#define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
+
+LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
+LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
+LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
+
+LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
+LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
+LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
+
+LE64_BITMASK(INODEv3_FIELDS_START,
+ struct bch_inode_v3, bi_flags, 31, 36);
+LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
+
+/* Dirents */
+
+/*
+ * Dirents (and xattrs) have to implement string lookups; since our b-tree
+ * doesn't support arbitrary length strings for the key, we instead index by a
+ * 64 bit hash (currently truncated sha1) of the string, stored in the offset
+ * field of the key - using linear probing to resolve hash collisions. This also
+ * provides us with the readdir cookie posix requires.
+ *
+ * Linear probing requires us to use whiteouts for deletions, in the event of a
+ * collision:
+ */
+
+struct bch_dirent {
+ struct bch_val v;
+
+ /* Target inode number: */
+ union {
+ __le64 d_inum;
+ struct { /* DT_SUBVOL */
+ __le32 d_child_subvol;
+ __le32 d_parent_subvol;
+ };
+ };
+
+ /*
+ * Copy of mode bits 12-15 from the target inode - so userspace can get
+ * the filetype without having to do a stat()
+ */
+ __u8 d_type;
+
+ __u8 d_name[];
+} __packed __aligned(8);
+
+#define DT_SUBVOL 16
+#define BCH_DT_MAX 17
+
+#define BCH_NAME_MAX 512
+
+/* Xattrs */
+
+#define KEY_TYPE_XATTR_INDEX_USER 0
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
+#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
+#define KEY_TYPE_XATTR_INDEX_TRUSTED 3
+#define KEY_TYPE_XATTR_INDEX_SECURITY 4
+
+struct bch_xattr {
+ struct bch_val v;
+ __u8 x_type;
+ __u8 x_name_len;
+ __le16 x_val_len;
+ __u8 x_name[];
+} __packed __aligned(8);
+
+/* Bucket/allocation information: */
+
+struct bch_alloc {
+ struct bch_val v;
+ __u8 fields;
+ __u8 gen;
+ __u8 data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V1() \
+ x(read_time, 16) \
+ x(write_time, 16) \
+ x(data_type, 8) \
+ x(dirty_sectors, 16) \
+ x(cached_sectors, 16) \
+ x(oldest_gen, 8) \
+ x(stripe, 32) \
+ x(stripe_redundancy, 8)
+
+enum {
+#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
+ BCH_ALLOC_FIELDS_V1()
+#undef x
+};
+
+struct bch_alloc_v2 {
+ struct bch_val v;
+ __u8 nr_fields;
+ __u8 gen;
+ __u8 oldest_gen;
+ __u8 data_type;
+ __u8 data[];
+} __packed __aligned(8);
+
+#define BCH_ALLOC_FIELDS_V2() \
+ x(read_time, 64) \
+ x(write_time, 64) \
+ x(dirty_sectors, 32) \
+ x(cached_sectors, 32) \
+ x(stripe, 32) \
+ x(stripe_redundancy, 8)
+
+struct bch_alloc_v3 {
+ struct bch_val v;
+ __le64 journal_seq;
+ __le32 flags;
+ __u8 nr_fields;
+ __u8 gen;
+ __u8 oldest_gen;
+ __u8 data_type;
+ __u8 data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
+LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
+
+struct bch_alloc_v4 {
+ struct bch_val v;
+ __u64 journal_seq;
+ __u32 flags;
+ __u8 gen;
+ __u8 oldest_gen;
+ __u8 data_type;
+ __u8 stripe_redundancy;
+ __u32 dirty_sectors;
+ __u32 cached_sectors;
+ __u64 io_time[2];
+ __u32 stripe;
+ __u32 nr_external_backpointers;
+ __u64 fragmentation_lru;
+} __packed __aligned(8);
+
+#define BCH_ALLOC_V4_U64s_V0 6
+#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64))
+
+BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
+BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
+BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
+BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
+
+#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
+
+struct bch_backpointer {
+ struct bch_val v;
+ __u8 btree_id;
+ __u8 level;
+ __u8 data_type;
+ __u64 bucket_offset:40;
+ __u32 bucket_len;
+ struct bpos pos;
+} __packed __aligned(8);
+
+#define KEY_TYPE_BUCKET_GENS_BITS 8
+#define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
+#define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
+
+struct bch_bucket_gens {
+ struct bch_val v;
+ u8 gens[KEY_TYPE_BUCKET_GENS_NR];
+} __packed __aligned(8);
+
+/* Quotas: */
+
+enum quota_types {
+ QTYP_USR = 0,
+ QTYP_GRP = 1,
+ QTYP_PRJ = 2,
+ QTYP_NR = 3,
+};
+
+enum quota_counters {
+ Q_SPC = 0,
+ Q_INO = 1,
+ Q_COUNTERS = 2,
+};
+
+struct bch_quota_counter {
+ __le64 hardlimit;
+ __le64 softlimit;
+};
+
+struct bch_quota {
+ struct bch_val v;
+ struct bch_quota_counter c[Q_COUNTERS];
+} __packed __aligned(8);
+
+/* Erasure coding */
+
+struct bch_stripe {
+ struct bch_val v;
+ __le16 sectors;
+ __u8 algorithm;
+ __u8 nr_blocks;
+ __u8 nr_redundant;
+
+ __u8 csum_granularity_bits;
+ __u8 csum_type;
+ __u8 pad;
+
+ struct bch_extent_ptr ptrs[];
+} __packed __aligned(8);
+
+/* Reflink: */
+
+struct bch_reflink_p {
+ struct bch_val v;
+ __le64 idx;
+ /*
+ * A reflink pointer might point to an indirect extent which is then
+ * later split (by copygc or rebalance). If we only pointed to part of
+ * the original indirect extent, and then one of the fragments is
+ * outside the range we point to, we'd leak a refcount: so when creating
+ * reflink pointers, we need to store pad values to remember the full
+ * range we were taking a reference on.
+ */
+ __le32 front_pad;
+ __le32 back_pad;
+} __packed __aligned(8);
+
+struct bch_reflink_v {
+ struct bch_val v;
+ __le64 refcount;
+ union bch_extent_entry start[0];
+ __u64 _data[];
+} __packed __aligned(8);
+
+struct bch_indirect_inline_data {
+ struct bch_val v;
+ __le64 refcount;
+ u8 data[];
+};
+
+/* Inline data */
+
+struct bch_inline_data {
+ struct bch_val v;
+ u8 data[];
+};
+
+/* Subvolumes: */
+
+#define SUBVOL_POS_MIN POS(0, 1)
+#define SUBVOL_POS_MAX POS(0, S32_MAX)
+#define BCACHEFS_ROOT_SUBVOL 1
+
+struct bch_subvolume {
+ struct bch_val v;
+ __le32 flags;
+ __le32 snapshot;
+ __le64 inode;
+ /*
+ * Snapshot subvolumes form a tree, separate from the snapshot nodes
+ * tree - if this subvolume is a snapshot, this is the ID of the
+ * subvolume it was created from:
+ */
+ __le32 parent;
+ __le32 pad;
+ bch_le128 otime;
+};
+
+LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
+/*
+ * We need to know whether a subvolume is a snapshot so we can know whether we
+ * can delete it (or whether it should just be rm -rf'd)
+ */
+LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
+LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
+
+/* Snapshots */
+
+struct bch_snapshot {
+ struct bch_val v;
+ __le32 flags;
+ __le32 parent;
+ __le32 children[2];
+ __le32 subvol;
+ /* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
+ __le32 tree;
+ __le32 depth;
+ __le32 skip[3];
+};
+
+LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
+
+/* True if a subvolume points to this snapshot node: */
+LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
+
+/*
+ * Snapshot trees:
+ *
+ * The snapshot_trees btree gives us persistent indentifier for each tree of
+ * bch_snapshot nodes, and allow us to record and easily find the root/master
+ * subvolume that other snapshots were created from:
+ */
+struct bch_snapshot_tree {
+ struct bch_val v;
+ __le32 master_subvol;
+ __le32 root_snapshot;
+};
+
+/* LRU btree: */
+
+struct bch_lru {
+ struct bch_val v;
+ __le64 idx;
+} __packed __aligned(8);
+
+#define LRU_ID_STRIPES (1U << 16)
+
+/* Logged operations btree: */
+
+struct bch_logged_op_truncate {
+ struct bch_val v;
+ __le32 subvol;
+ __le32 pad;
+ __le64 inum;
+ __le64 new_i_size;
+};
+
+enum logged_op_finsert_state {
+ LOGGED_OP_FINSERT_start,
+ LOGGED_OP_FINSERT_shift_extents,
+ LOGGED_OP_FINSERT_finish,
+};
+
+struct bch_logged_op_finsert {
+ struct bch_val v;
+ __u8 state;
+ __u8 pad[3];
+ __le32 subvol;
+ __le64 inum;
+ __le64 dst_offset;
+ __le64 src_offset;
+ __le64 pos;
+};
+
+/* Optional/variable size superblock sections: */
+
+struct bch_sb_field {
+ __u64 _data[0];
+ __le32 u64s;
+ __le32 type;
+};
+
+#define BCH_SB_FIELDS() \
+ x(journal, 0) \
+ x(members_v1, 1) \
+ x(crypt, 2) \
+ x(replicas_v0, 3) \
+ x(quota, 4) \
+ x(disk_groups, 5) \
+ x(clean, 6) \
+ x(replicas, 7) \
+ x(journal_seq_blacklist, 8) \
+ x(journal_v2, 9) \
+ x(counters, 10) \
+ x(members_v2, 11)
+
+enum bch_sb_field_type {
+#define x(f, nr) BCH_SB_FIELD_##f = nr,
+ BCH_SB_FIELDS()
+#undef x
+ BCH_SB_FIELD_NR
+};
+
+/*
+ * Most superblock fields are replicated in all device's superblocks - a few are
+ * not:
+ */
+#define BCH_SINGLE_DEVICE_SB_FIELDS \
+ ((1U << BCH_SB_FIELD_journal)| \
+ (1U << BCH_SB_FIELD_journal_v2))
+
+/* BCH_SB_FIELD_journal: */
+
+struct bch_sb_field_journal {
+ struct bch_sb_field field;
+ __le64 buckets[];
+};
+
+struct bch_sb_field_journal_v2 {
+ struct bch_sb_field field;
+
+ struct bch_sb_field_journal_v2_entry {
+ __le64 start;
+ __le64 nr;
+ } d[];
+};
+
+/* BCH_SB_FIELD_members_v1: */
+
+#define BCH_MIN_NR_NBUCKETS (1 << 6)
+
+#define BCH_IOPS_MEASUREMENTS() \
+ x(seqread, 0) \
+ x(seqwrite, 1) \
+ x(randread, 2) \
+ x(randwrite, 3)
+
+enum bch_iops_measurement {
+#define x(t, n) BCH_IOPS_##t = n,
+ BCH_IOPS_MEASUREMENTS()
+#undef x
+ BCH_IOPS_NR
+};
+
+struct bch_member {
+ __uuid_t uuid;
+ __le64 nbuckets; /* device size */
+ __le16 first_bucket; /* index of first bucket used */
+ __le16 bucket_size; /* sectors */
+ __le32 pad;
+ __le64 last_mount; /* time_t */
+
+ __le64 flags;
+ __le32 iops[4];
+};
+
+#define BCH_MEMBER_V1_BYTES 56
+
+LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
+/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
+LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags, 14, 15)
+LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags, 15, 20)
+LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
+LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
+LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
+ struct bch_member, flags, 30, 31)
+
+#if 0
+LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
+LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
+#endif
+
+#define BCH_MEMBER_STATES() \
+ x(rw, 0) \
+ x(ro, 1) \
+ x(failed, 2) \
+ x(spare, 3)
+
+enum bch_member_state {
+#define x(t, n) BCH_MEMBER_STATE_##t = n,
+ BCH_MEMBER_STATES()
+#undef x
+ BCH_MEMBER_STATE_NR
+};
+
+struct bch_sb_field_members_v1 {
+ struct bch_sb_field field;
+ struct bch_member _members[]; //Members are now variable size
+};
+
+struct bch_sb_field_members_v2 {
+ struct bch_sb_field field;
+ __le16 member_bytes; //size of single member entry
+ u8 pad[6];
+ struct bch_member _members[];
+};
+
+/* BCH_SB_FIELD_crypt: */
+
+struct nonce {
+ __le32 d[4];
+};
+
+struct bch_key {
+ __le64 key[4];
+};
+
+#define BCH_KEY_MAGIC \
+ (((__u64) 'b' << 0)|((__u64) 'c' << 8)| \
+ ((__u64) 'h' << 16)|((__u64) '*' << 24)| \
+ ((__u64) '*' << 32)|((__u64) 'k' << 40)| \
+ ((__u64) 'e' << 48)|((__u64) 'y' << 56))
+
+struct bch_encrypted_key {
+ __le64 magic;
+ struct bch_key key;
+};
+
+/*
+ * If this field is present in the superblock, it stores an encryption key which
+ * is used encrypt all other data/metadata. The key will normally be encrypted
+ * with the key userspace provides, but if encryption has been turned off we'll
+ * just store the master key unencrypted in the superblock so we can access the
+ * previously encrypted data.
+ */
+struct bch_sb_field_crypt {
+ struct bch_sb_field field;
+
+ __le64 flags;
+ __le64 kdf_flags;
+ struct bch_encrypted_key key;
+};
+
+LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
+
+enum bch_kdf_types {
+ BCH_KDF_SCRYPT = 0,
+ BCH_KDF_NR = 1,
+};
+
+/* stored as base 2 log of scrypt params: */
+LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
+LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
+LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
+
+/* BCH_SB_FIELD_replicas: */
+
+#define BCH_DATA_TYPES() \
+ x(free, 0) \
+ x(sb, 1) \
+ x(journal, 2) \
+ x(btree, 3) \
+ x(user, 4) \
+ x(cached, 5) \
+ x(parity, 6) \
+ x(stripe, 7) \
+ x(need_gc_gens, 8) \
+ x(need_discard, 9)
+
+enum bch_data_type {
+#define x(t, n) BCH_DATA_##t,
+ BCH_DATA_TYPES()
+#undef x
+ BCH_DATA_NR
+};
+
+static inline bool data_type_is_empty(enum bch_data_type type)
+{
+ switch (type) {
+ case BCH_DATA_free:
+ case BCH_DATA_need_gc_gens:
+ case BCH_DATA_need_discard:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool data_type_is_hidden(enum bch_data_type type)
+{
+ switch (type) {
+ case BCH_DATA_sb:
+ case BCH_DATA_journal:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct bch_replicas_entry_v0 {
+ __u8 data_type;
+ __u8 nr_devs;
+ __u8 devs[];
+} __packed;
+
+struct bch_sb_field_replicas_v0 {
+ struct bch_sb_field field;
+ struct bch_replicas_entry_v0 entries[];
+} __packed __aligned(8);
+
+struct bch_replicas_entry {
+ __u8 data_type;
+ __u8 nr_devs;
+ __u8 nr_required;
+ __u8 devs[];
+} __packed;
+
+#define replicas_entry_bytes(_i) \
+ (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
+
+struct bch_sb_field_replicas {
+ struct bch_sb_field field;
+ struct bch_replicas_entry entries[];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_quota: */
+
+struct bch_sb_quota_counter {
+ __le32 timelimit;
+ __le32 warnlimit;
+};
+
+struct bch_sb_quota_type {
+ __le64 flags;
+ struct bch_sb_quota_counter c[Q_COUNTERS];
+};
+
+struct bch_sb_field_quota {
+ struct bch_sb_field field;
+ struct bch_sb_quota_type q[QTYP_NR];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_disk_groups: */
+
+#define BCH_SB_LABEL_SIZE 32
+
+struct bch_disk_group {
+ __u8 label[BCH_SB_LABEL_SIZE];
+ __le64 flags[2];
+} __packed __aligned(8);
+
+LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
+LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
+LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
+
+struct bch_sb_field_disk_groups {
+ struct bch_sb_field field;
+ struct bch_disk_group entries[];
+} __packed __aligned(8);
+
+/* BCH_SB_FIELD_counters */
+
+#define BCH_PERSISTENT_COUNTERS() \
+ x(io_read, 0) \
+ x(io_write, 1) \
+ x(io_move, 2) \
+ x(bucket_invalidate, 3) \
+ x(bucket_discard, 4) \
+ x(bucket_alloc, 5) \
+ x(bucket_alloc_fail, 6) \
+ x(btree_cache_scan, 7) \
+ x(btree_cache_reap, 8) \
+ x(btree_cache_cannibalize, 9) \
+ x(btree_cache_cannibalize_lock, 10) \
+ x(btree_cache_cannibalize_lock_fail, 11) \
+ x(btree_cache_cannibalize_unlock, 12) \
+ x(btree_node_write, 13) \
+ x(btree_node_read, 14) \
+ x(btree_node_compact, 15) \
+ x(btree_node_merge, 16) \
+ x(btree_node_split, 17) \
+ x(btree_node_rewrite, 18) \
+ x(btree_node_alloc, 19) \
+ x(btree_node_free, 20) \
+ x(btree_node_set_root, 21) \
+ x(btree_path_relock_fail, 22) \
+ x(btree_path_upgrade_fail, 23) \
+ x(btree_reserve_get_fail, 24) \
+ x(journal_entry_full, 25) \
+ x(journal_full, 26) \
+ x(journal_reclaim_finish, 27) \
+ x(journal_reclaim_start, 28) \
+ x(journal_write, 29) \
+ x(read_promote, 30) \
+ x(read_bounce, 31) \
+ x(read_split, 33) \
+ x(read_retry, 32) \
+ x(read_reuse_race, 34) \
+ x(move_extent_read, 35) \
+ x(move_extent_write, 36) \
+ x(move_extent_finish, 37) \
+ x(move_extent_fail, 38) \
+ x(move_extent_alloc_mem_fail, 39) \
+ x(copygc, 40) \
+ x(copygc_wait, 41) \
+ x(gc_gens_end, 42) \
+ x(gc_gens_start, 43) \
+ x(trans_blocked_journal_reclaim, 44) \
+ x(trans_restart_btree_node_reused, 45) \
+ x(trans_restart_btree_node_split, 46) \
+ x(trans_restart_fault_inject, 47) \
+ x(trans_restart_iter_upgrade, 48) \
+ x(trans_restart_journal_preres_get, 49) \
+ x(trans_restart_journal_reclaim, 50) \
+ x(trans_restart_journal_res_get, 51) \
+ x(trans_restart_key_cache_key_realloced, 52) \
+ x(trans_restart_key_cache_raced, 53) \
+ x(trans_restart_mark_replicas, 54) \
+ x(trans_restart_mem_realloced, 55) \
+ x(trans_restart_memory_allocation_failure, 56) \
+ x(trans_restart_relock, 57) \
+ x(trans_restart_relock_after_fill, 58) \
+ x(trans_restart_relock_key_cache_fill, 59) \
+ x(trans_restart_relock_next_node, 60) \
+ x(trans_restart_relock_parent_for_fill, 61) \
+ x(trans_restart_relock_path, 62) \
+ x(trans_restart_relock_path_intent, 63) \
+ x(trans_restart_too_many_iters, 64) \
+ x(trans_restart_traverse, 65) \
+ x(trans_restart_upgrade, 66) \
+ x(trans_restart_would_deadlock, 67) \
+ x(trans_restart_would_deadlock_write, 68) \
+ x(trans_restart_injected, 69) \
+ x(trans_restart_key_cache_upgrade, 70) \
+ x(trans_traverse_all, 71) \
+ x(transaction_commit, 72) \
+ x(write_super, 73) \
+ x(trans_restart_would_deadlock_recursion_limit, 74) \
+ x(trans_restart_write_buffer_flush, 75) \
+ x(trans_restart_split_race, 76)
+
+enum bch_persistent_counters {
+#define x(t, n, ...) BCH_COUNTER_##t,
+ BCH_PERSISTENT_COUNTERS()
+#undef x
+ BCH_COUNTER_NR
+};
+
+struct bch_sb_field_counters {
+ struct bch_sb_field field;
+ __le64 d[];
+};
+
+/*
+ * On clean shutdown, store btree roots and current journal sequence number in
+ * the superblock:
+ */
+struct jset_entry {
+ __le16 u64s;
+ __u8 btree_id;
+ __u8 level;
+ __u8 type; /* designates what this jset holds */
+ __u8 pad[3];
+
+ struct bkey_i start[0];
+ __u64 _data[];
+};
+
+struct bch_sb_field_clean {
+ struct bch_sb_field field;
+
+ __le32 flags;
+ __le16 _read_clock; /* no longer used */
+ __le16 _write_clock;
+ __le64 journal_seq;
+
+ struct jset_entry start[0];
+ __u64 _data[];
+};
+
+struct journal_seq_blacklist_entry {
+ __le64 start;
+ __le64 end;
+};
+
+struct bch_sb_field_journal_seq_blacklist {
+ struct bch_sb_field field;
+
+ struct journal_seq_blacklist_entry start[0];
+ __u64 _data[];
+};
+
+/* Superblock: */
+
+/*
+ * New versioning scheme:
+ * One common version number for all on disk data structures - superblock, btree
+ * nodes, journal entries
+ */
+#define BCH_VERSION_MAJOR(_v) ((__u16) ((_v) >> 10))
+#define BCH_VERSION_MINOR(_v) ((__u16) ((_v) & ~(~0U << 10)))
+#define BCH_VERSION(_major, _minor) (((_major) << 10)|(_minor) << 0)
+
+#define RECOVERY_PASS_ALL_FSCK (1ULL << 63)
+
+#define BCH_METADATA_VERSIONS() \
+ x(bkey_renumber, BCH_VERSION(0, 10), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(inode_btree_change, BCH_VERSION(0, 11), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(snapshot, BCH_VERSION(0, 12), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(inode_backpointers, BCH_VERSION(0, 13), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(btree_ptr_sectors_written, BCH_VERSION(0, 14), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(snapshot_2, BCH_VERSION(0, 15), \
+ BIT_ULL(BCH_RECOVERY_PASS_fs_upgrade_for_subvolumes)| \
+ BIT_ULL(BCH_RECOVERY_PASS_initialize_subvolumes)| \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(reflink_p_fix, BCH_VERSION(0, 16), \
+ BIT_ULL(BCH_RECOVERY_PASS_fix_reflink_p)) \
+ x(subvol_dirent, BCH_VERSION(0, 17), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(inode_v2, BCH_VERSION(0, 18), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(freespace, BCH_VERSION(0, 19), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(alloc_v4, BCH_VERSION(0, 20), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(new_data_types, BCH_VERSION(0, 21), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(backpointers, BCH_VERSION(0, 22), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(inode_v3, BCH_VERSION(0, 23), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(unwritten_extents, BCH_VERSION(0, 24), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(bucket_gens, BCH_VERSION(0, 25), \
+ BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)| \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(lru_v2, BCH_VERSION(0, 26), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(fragmentation_lru, BCH_VERSION(0, 27), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(no_bps_in_alloc_keys, BCH_VERSION(0, 28), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(snapshot_trees, BCH_VERSION(0, 29), \
+ RECOVERY_PASS_ALL_FSCK) \
+ x(major_minor, BCH_VERSION(1, 0), \
+ 0) \
+ x(snapshot_skiplists, BCH_VERSION(1, 1), \
+ BIT_ULL(BCH_RECOVERY_PASS_check_snapshots)) \
+ x(deleted_inodes, BCH_VERSION(1, 2), \
+ BIT_ULL(BCH_RECOVERY_PASS_check_inodes))
+
+enum bcachefs_metadata_version {
+ bcachefs_metadata_version_min = 9,
+#define x(t, n, upgrade_passes) bcachefs_metadata_version_##t = n,
+ BCH_METADATA_VERSIONS()
+#undef x
+ bcachefs_metadata_version_max
+};
+
+static const __maybe_unused
+unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor;
+
+#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
+
+#define BCH_SB_SECTOR 8
+#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
+
+struct bch_sb_layout {
+ __uuid_t magic; /* bcachefs superblock UUID */
+ __u8 layout_type;
+ __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
+ __u8 nr_superblocks;
+ __u8 pad[5];
+ __le64 sb_offset[61];
+} __packed __aligned(8);
+
+#define BCH_SB_LAYOUT_SECTOR 7
+
+/*
+ * @offset - sector where this sb was written
+ * @version - on disk format version
+ * @version_min - Oldest metadata version this filesystem contains; so we can
+ * safely drop compatibility code and refuse to mount filesystems
+ * we'd need it for
+ * @magic - identifies as a bcachefs superblock (BCHFS_MAGIC)
+ * @seq - incremented each time superblock is written
+ * @uuid - used for generating various magic numbers and identifying
+ * member devices, never changes
+ * @user_uuid - user visible UUID, may be changed
+ * @label - filesystem label
+ * @seq - identifies most recent superblock, incremented each time
+ * superblock is written
+ * @features - enabled incompatible features
+ */
+struct bch_sb {
+ struct bch_csum csum;
+ __le16 version;
+ __le16 version_min;
+ __le16 pad[2];
+ __uuid_t magic;
+ __uuid_t uuid;
+ __uuid_t user_uuid;
+ __u8 label[BCH_SB_LABEL_SIZE];
+ __le64 offset;
+ __le64 seq;
+
+ __le16 block_size;
+ __u8 dev_idx;
+ __u8 nr_devices;
+ __le32 u64s;
+
+ __le64 time_base_lo;
+ __le32 time_base_hi;
+ __le32 time_precision;
+
+ __le64 flags[8];
+ __le64 features[2];
+ __le64 compat[2];
+
+ struct bch_sb_layout layout;
+
+ struct bch_sb_field start[0];
+ __le64 _data[];
+} __packed __aligned(8);
+
+/*
+ * Flags:
+ * BCH_SB_INITALIZED - set on first mount
+ * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
+ * behaviour of mount/recovery path:
+ * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
+ * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
+ * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
+ * DATA/META_CSUM_TYPE. Also indicates encryption
+ * algorithm in use, if/when we get more than one
+ */
+
+LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
+
+LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
+LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
+LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
+LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
+
+LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
+
+LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
+LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
+
+LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
+LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
+
+LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
+LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
+
+LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
+LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
+LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
+LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
+
+LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
+LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
+
+LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
+
+LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
+LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
+LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
+
+LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
+LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
+
+/*
+ * Max size of an extent that may require bouncing to read or write
+ * (checksummed, compressed): 64k
+ */
+LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
+ struct bch_sb, flags[1], 14, 20);
+
+LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
+LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
+
+LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
+LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
+LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
+
+LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
+ struct bch_sb, flags[2], 0, 4);
+LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
+
+LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
+LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
+LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
+LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
+LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
+LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
+LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
+LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
+LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
+LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
+LE64_BITMASK(BCH_SB_VERSION_UPGRADE, struct bch_sb, flags[4], 54, 56);
+
+LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
+LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
+ struct bch_sb, flags[4], 60, 64);
+
+LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
+ struct bch_sb, flags[5], 0, 16);
+
+static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
+{
+ return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
+}
+
+static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
+{
+ SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
+ SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
+}
+
+static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
+{
+ return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
+ (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
+}
+
+static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
+{
+ SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
+ SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
+}
+
+/*
+ * Features:
+ *
+ * journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
+ * reflink: gates KEY_TYPE_reflink
+ * inline_data: gates KEY_TYPE_inline_data
+ * new_siphash: gates BCH_STR_HASH_siphash
+ * new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
+ */
+#define BCH_SB_FEATURES() \
+ x(lz4, 0) \
+ x(gzip, 1) \
+ x(zstd, 2) \
+ x(atomic_nlink, 3) \
+ x(ec, 4) \
+ x(journal_seq_blacklist_v3, 5) \
+ x(reflink, 6) \
+ x(new_siphash, 7) \
+ x(inline_data, 8) \
+ x(new_extent_overwrite, 9) \
+ x(incompressible, 10) \
+ x(btree_ptr_v2, 11) \
+ x(extents_above_btree_updates, 12) \
+ x(btree_updates_journalled, 13) \
+ x(reflink_inline_data, 14) \
+ x(new_varint, 15) \
+ x(journal_no_flush, 16) \
+ x(alloc_v2, 17) \
+ x(extents_across_btree_nodes, 18)
+
+#define BCH_SB_FEATURES_ALWAYS \
+ ((1ULL << BCH_FEATURE_new_extent_overwrite)| \
+ (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
+ (1ULL << BCH_FEATURE_btree_updates_journalled)|\
+ (1ULL << BCH_FEATURE_alloc_v2)|\
+ (1ULL << BCH_FEATURE_extents_across_btree_nodes))
+
+#define BCH_SB_FEATURES_ALL \
+ (BCH_SB_FEATURES_ALWAYS| \
+ (1ULL << BCH_FEATURE_new_siphash)| \
+ (1ULL << BCH_FEATURE_btree_ptr_v2)| \
+ (1ULL << BCH_FEATURE_new_varint)| \
+ (1ULL << BCH_FEATURE_journal_no_flush))
+
+enum bch_sb_feature {
+#define x(f, n) BCH_FEATURE_##f,
+ BCH_SB_FEATURES()
+#undef x
+ BCH_FEATURE_NR,
+};
+
+#define BCH_SB_COMPAT() \
+ x(alloc_info, 0) \
+ x(alloc_metadata, 1) \
+ x(extents_above_btree_updates_done, 2) \
+ x(bformat_overflow_done, 3)
+
+enum bch_sb_compat {
+#define x(f, n) BCH_COMPAT_##f,
+ BCH_SB_COMPAT()
+#undef x
+ BCH_COMPAT_NR,
+};
+
+/* options: */
+
+#define BCH_VERSION_UPGRADE_OPTS() \
+ x(compatible, 0) \
+ x(incompatible, 1) \
+ x(none, 2)
+
+enum bch_version_upgrade_opts {
+#define x(t, n) BCH_VERSION_UPGRADE_##t = n,
+ BCH_VERSION_UPGRADE_OPTS()
+#undef x
+};
+
+#define BCH_REPLICAS_MAX 4U
+
+#define BCH_BKEY_PTRS_MAX 16U
+
+#define BCH_ERROR_ACTIONS() \
+ x(continue, 0) \
+ x(ro, 1) \
+ x(panic, 2)
+
+enum bch_error_actions {
+#define x(t, n) BCH_ON_ERROR_##t = n,
+ BCH_ERROR_ACTIONS()
+#undef x
+ BCH_ON_ERROR_NR
+};
+
+#define BCH_STR_HASH_TYPES() \
+ x(crc32c, 0) \
+ x(crc64, 1) \
+ x(siphash_old, 2) \
+ x(siphash, 3)
+
+enum bch_str_hash_type {
+#define x(t, n) BCH_STR_HASH_##t = n,
+ BCH_STR_HASH_TYPES()
+#undef x
+ BCH_STR_HASH_NR
+};
+
+#define BCH_STR_HASH_OPTS() \
+ x(crc32c, 0) \
+ x(crc64, 1) \
+ x(siphash, 2)
+
+enum bch_str_hash_opts {
+#define x(t, n) BCH_STR_HASH_OPT_##t = n,
+ BCH_STR_HASH_OPTS()
+#undef x
+ BCH_STR_HASH_OPT_NR
+};
+
+#define BCH_CSUM_TYPES() \
+ x(none, 0) \
+ x(crc32c_nonzero, 1) \
+ x(crc64_nonzero, 2) \
+ x(chacha20_poly1305_80, 3) \
+ x(chacha20_poly1305_128, 4) \
+ x(crc32c, 5) \
+ x(crc64, 6) \
+ x(xxhash, 7)
+
+enum bch_csum_type {
+#define x(t, n) BCH_CSUM_##t = n,
+ BCH_CSUM_TYPES()
+#undef x
+ BCH_CSUM_NR
+};
+
+static const __maybe_unused unsigned bch_crc_bytes[] = {
+ [BCH_CSUM_none] = 0,
+ [BCH_CSUM_crc32c_nonzero] = 4,
+ [BCH_CSUM_crc32c] = 4,
+ [BCH_CSUM_crc64_nonzero] = 8,
+ [BCH_CSUM_crc64] = 8,
+ [BCH_CSUM_xxhash] = 8,
+ [BCH_CSUM_chacha20_poly1305_80] = 10,
+ [BCH_CSUM_chacha20_poly1305_128] = 16,
+};
+
+static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
+{
+ switch (type) {
+ case BCH_CSUM_chacha20_poly1305_80:
+ case BCH_CSUM_chacha20_poly1305_128:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#define BCH_CSUM_OPTS() \
+ x(none, 0) \
+ x(crc32c, 1) \
+ x(crc64, 2) \
+ x(xxhash, 3)
+
+enum bch_csum_opts {
+#define x(t, n) BCH_CSUM_OPT_##t = n,
+ BCH_CSUM_OPTS()
+#undef x
+ BCH_CSUM_OPT_NR
+};
+
+#define BCH_COMPRESSION_TYPES() \
+ x(none, 0) \
+ x(lz4_old, 1) \
+ x(gzip, 2) \
+ x(lz4, 3) \
+ x(zstd, 4) \
+ x(incompressible, 5)
+
+enum bch_compression_type {
+#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
+ BCH_COMPRESSION_TYPES()
+#undef x
+ BCH_COMPRESSION_TYPE_NR
+};
+
+#define BCH_COMPRESSION_OPTS() \
+ x(none, 0) \
+ x(lz4, 1) \
+ x(gzip, 2) \
+ x(zstd, 3)
+
+enum bch_compression_opts {
+#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
+ BCH_COMPRESSION_OPTS()
+#undef x
+ BCH_COMPRESSION_OPT_NR
+};
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define BCACHE_MAGIC \
+ UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \
+ 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
+#define BCHFS_MAGIC \
+ UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
+ 0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
+
+#define BCACHEFS_STATFS_MAGIC 0xca451a4e
+
+#define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
+#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
+
+static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
+{
+ __le64 ret;
+
+ memcpy(&ret, &sb->uuid, sizeof(ret));
+ return ret;
+}
+
+static inline __u64 __jset_magic(struct bch_sb *sb)
+{
+ return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
+}
+
+static inline __u64 __bset_magic(struct bch_sb *sb)
+{
+ return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
+}
+
+/* Journal */
+
+#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
+
+#define BCH_JSET_ENTRY_TYPES() \
+ x(btree_keys, 0) \
+ x(btree_root, 1) \
+ x(prio_ptrs, 2) \
+ x(blacklist, 3) \
+ x(blacklist_v2, 4) \
+ x(usage, 5) \
+ x(data_usage, 6) \
+ x(clock, 7) \
+ x(dev_usage, 8) \
+ x(log, 9) \
+ x(overwrite, 10)
+
+enum {
+#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
+ BCH_JSET_ENTRY_TYPES()
+#undef x
+ BCH_JSET_ENTRY_NR
+};
+
+/*
+ * Journal sequence numbers can be blacklisted: bsets record the max sequence
+ * number of all the journal entries they contain updates for, so that on
+ * recovery we can ignore those bsets that contain index updates newer that what
+ * made it into the journal.
+ *
+ * This means that we can't reuse that journal_seq - we have to skip it, and
+ * then record that we skipped it so that the next time we crash and recover we
+ * don't think there was a missing journal entry.
+ */
+struct jset_entry_blacklist {
+ struct jset_entry entry;
+ __le64 seq;
+};
+
+struct jset_entry_blacklist_v2 {
+ struct jset_entry entry;
+ __le64 start;
+ __le64 end;
+};
+
+#define BCH_FS_USAGE_TYPES() \
+ x(reserved, 0) \
+ x(inodes, 1) \
+ x(key_version, 2)
+
+enum {
+#define x(f, nr) BCH_FS_USAGE_##f = nr,
+ BCH_FS_USAGE_TYPES()
+#undef x
+ BCH_FS_USAGE_NR
+};
+
+struct jset_entry_usage {
+ struct jset_entry entry;
+ __le64 v;
+} __packed;
+
+struct jset_entry_data_usage {
+ struct jset_entry entry;
+ __le64 v;
+ struct bch_replicas_entry r;
+} __packed;
+
+struct jset_entry_clock {
+ struct jset_entry entry;
+ __u8 rw;
+ __u8 pad[7];
+ __le64 time;
+} __packed;
+
+struct jset_entry_dev_usage_type {
+ __le64 buckets;
+ __le64 sectors;
+ __le64 fragmented;
+} __packed;
+
+struct jset_entry_dev_usage {
+ struct jset_entry entry;
+ __le32 dev;
+ __u32 pad;
+
+ __le64 buckets_ec;
+ __le64 _buckets_unavailable; /* No longer used */
+
+ struct jset_entry_dev_usage_type d[];
+};
+
+static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
+{
+ return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
+ sizeof(struct jset_entry_dev_usage_type);
+}
+
+struct jset_entry_log {
+ struct jset_entry entry;
+ u8 d[];
+} __packed;
+
+/*
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+struct jset {
+ struct bch_csum csum;
+
+ __le64 magic;
+ __le64 seq;
+ __le32 version;
+ __le32 flags;
+
+ __le32 u64s; /* size of d[] in u64s */
+
+ __u8 encrypted_start[0];
+
+ __le16 _read_clock; /* no longer used */
+ __le16 _write_clock;
+
+ /* Sequence number of oldest dirty journal entry */
+ __le64 last_seq;
+
+
+ struct jset_entry start[0];
+ __u64 _data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
+LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
+LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
+
+#define BCH_JOURNAL_BUCKETS_MIN 8
+
+/* Btree: */
+
+enum btree_id_flags {
+ BTREE_ID_EXTENTS = BIT(0),
+ BTREE_ID_SNAPSHOTS = BIT(1),
+ BTREE_ID_DATA = BIT(2),
+};
+
+#define BCH_BTREE_IDS() \
+ x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
+ BIT_ULL(KEY_TYPE_whiteout)| \
+ BIT_ULL(KEY_TYPE_error)| \
+ BIT_ULL(KEY_TYPE_cookie)| \
+ BIT_ULL(KEY_TYPE_extent)| \
+ BIT_ULL(KEY_TYPE_reservation)| \
+ BIT_ULL(KEY_TYPE_reflink_p)| \
+ BIT_ULL(KEY_TYPE_inline_data)) \
+ x(inodes, 1, BTREE_ID_SNAPSHOTS, \
+ BIT_ULL(KEY_TYPE_whiteout)| \
+ BIT_ULL(KEY_TYPE_inode)| \
+ BIT_ULL(KEY_TYPE_inode_v2)| \
+ BIT_ULL(KEY_TYPE_inode_v3)| \
+ BIT_ULL(KEY_TYPE_inode_generation)) \
+ x(dirents, 2, BTREE_ID_SNAPSHOTS, \
+ BIT_ULL(KEY_TYPE_whiteout)| \
+ BIT_ULL(KEY_TYPE_hash_whiteout)| \
+ BIT_ULL(KEY_TYPE_dirent)) \
+ x(xattrs, 3, BTREE_ID_SNAPSHOTS, \
+ BIT_ULL(KEY_TYPE_whiteout)| \
+ BIT_ULL(KEY_TYPE_cookie)| \
+ BIT_ULL(KEY_TYPE_hash_whiteout)| \
+ BIT_ULL(KEY_TYPE_xattr)) \
+ x(alloc, 4, 0, \
+ BIT_ULL(KEY_TYPE_alloc)| \
+ BIT_ULL(KEY_TYPE_alloc_v2)| \
+ BIT_ULL(KEY_TYPE_alloc_v3)| \
+ BIT_ULL(KEY_TYPE_alloc_v4)) \
+ x(quotas, 5, 0, \
+ BIT_ULL(KEY_TYPE_quota)) \
+ x(stripes, 6, 0, \
+ BIT_ULL(KEY_TYPE_stripe)) \
+ x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
+ BIT_ULL(KEY_TYPE_reflink_v)| \
+ BIT_ULL(KEY_TYPE_indirect_inline_data)) \
+ x(subvolumes, 8, 0, \
+ BIT_ULL(KEY_TYPE_subvolume)) \
+ x(snapshots, 9, 0, \
+ BIT_ULL(KEY_TYPE_snapshot)) \
+ x(lru, 10, 0, \
+ BIT_ULL(KEY_TYPE_set)) \
+ x(freespace, 11, BTREE_ID_EXTENTS, \
+ BIT_ULL(KEY_TYPE_set)) \
+ x(need_discard, 12, 0, \
+ BIT_ULL(KEY_TYPE_set)) \
+ x(backpointers, 13, 0, \
+ BIT_ULL(KEY_TYPE_backpointer)) \
+ x(bucket_gens, 14, 0, \
+ BIT_ULL(KEY_TYPE_bucket_gens)) \
+ x(snapshot_trees, 15, 0, \
+ BIT_ULL(KEY_TYPE_snapshot_tree)) \
+ x(deleted_inodes, 16, BTREE_ID_SNAPSHOTS, \
+ BIT_ULL(KEY_TYPE_set)) \
+ x(logged_ops, 17, 0, \
+ BIT_ULL(KEY_TYPE_logged_op_truncate)| \
+ BIT_ULL(KEY_TYPE_logged_op_finsert))
+
+enum btree_id {
+#define x(name, nr, ...) BTREE_ID_##name = nr,
+ BCH_BTREE_IDS()
+#undef x
+ BTREE_ID_NR
+};
+
+#define BTREE_MAX_DEPTH 4U
+
+/* Btree nodes */
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+ __le64 seq;
+
+ /*
+ * Highest journal entry this bset contains keys for.
+ * If on recovery we don't see that journal entry, this bset is ignored:
+ * this allows us to preserve the order of all index updates after a
+ * crash, since the journal records a total order of all index updates
+ * and anything that didn't make it to the journal doesn't get used.
+ */
+ __le64 journal_seq;
+
+ __le32 flags;
+ __le16 version;
+ __le16 u64s; /* count of d[] in u64s */
+
+ struct bkey_packed start[0];
+ __u64 _data[];
+} __packed __aligned(8);
+
+LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
+
+LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
+LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
+ struct bset, flags, 5, 6);
+
+/* Sector offset within the btree node: */
+LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32);
+
+struct btree_node {
+ struct bch_csum csum;
+ __le64 magic;
+
+ /* this flags field is encrypted, unlike bset->flags: */
+ __le64 flags;
+
+ /* Closed interval: */
+ struct bpos min_key;
+ struct bpos max_key;
+ struct bch_extent_ptr _ptr; /* not used anymore */
+ struct bkey_format format;
+
+ union {
+ struct bset keys;
+ struct {
+ __u8 pad[22];
+ __le16 u64s;
+ __u64 _data[0];
+
+ };
+ };
+} __packed __aligned(8);
+
+LE64_BITMASK(BTREE_NODE_ID_LO, struct btree_node, flags, 0, 4);
+LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
+LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
+ struct btree_node, flags, 8, 9);
+LE64_BITMASK(BTREE_NODE_ID_HI, struct btree_node, flags, 9, 25);
+/* 25-32 unused */
+LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
+
+static inline __u64 BTREE_NODE_ID(struct btree_node *n)
+{
+ return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
+}
+
+static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
+{
+ SET_BTREE_NODE_ID_LO(n, v);
+ SET_BTREE_NODE_ID_HI(n, v >> 4);
+}
+
+struct btree_node_entry {
+ struct bch_csum csum;
+
+ union {
+ struct bset keys;
+ struct {
+ __u8 pad[22];
+ __le16 u64s;
+ __u64 _data[0];
+ };
+ };
+} __packed __aligned(8);
+
+#endif /* _BCACHEFS_FORMAT_H */
diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h
new file mode 100644
index 000000000000..f05881f7e113
--- /dev/null
+++ b/fs/bcachefs/bcachefs_ioctl.h
@@ -0,0 +1,368 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_IOCTL_H
+#define _BCACHEFS_IOCTL_H
+
+#include <linux/uuid.h>
+#include <asm/ioctl.h>
+#include "bcachefs_format.h"
+
+/*
+ * Flags common to multiple ioctls:
+ */
+#define BCH_FORCE_IF_DATA_LOST (1 << 0)
+#define BCH_FORCE_IF_METADATA_LOST (1 << 1)
+#define BCH_FORCE_IF_DATA_DEGRADED (1 << 2)
+#define BCH_FORCE_IF_METADATA_DEGRADED (1 << 3)
+
+#define BCH_FORCE_IF_LOST \
+ (BCH_FORCE_IF_DATA_LOST| \
+ BCH_FORCE_IF_METADATA_LOST)
+#define BCH_FORCE_IF_DEGRADED \
+ (BCH_FORCE_IF_DATA_DEGRADED| \
+ BCH_FORCE_IF_METADATA_DEGRADED)
+
+/*
+ * If cleared, ioctl that refer to a device pass it as a pointer to a pathname
+ * (e.g. /dev/sda1); if set, the dev field is the device's index within the
+ * filesystem:
+ */
+#define BCH_BY_INDEX (1 << 4)
+
+/*
+ * For BCH_IOCTL_READ_SUPER: get superblock of a specific device, not filesystem
+ * wide superblock:
+ */
+#define BCH_READ_DEV (1 << 5)
+
+/* global control dev: */
+
+/* These are currently broken, and probably unnecessary: */
+#if 0
+#define BCH_IOCTL_ASSEMBLE _IOW(0xbc, 1, struct bch_ioctl_assemble)
+#define BCH_IOCTL_INCREMENTAL _IOW(0xbc, 2, struct bch_ioctl_incremental)
+
+struct bch_ioctl_assemble {
+ __u32 flags;
+ __u32 nr_devs;
+ __u64 pad;
+ __u64 devs[];
+};
+
+struct bch_ioctl_incremental {
+ __u32 flags;
+ __u64 pad;
+ __u64 dev;
+};
+#endif
+
+/* filesystem ioctls: */
+
+#define BCH_IOCTL_QUERY_UUID _IOR(0xbc, 1, struct bch_ioctl_query_uuid)
+
+/* These only make sense when we also have incremental assembly */
+#if 0
+#define BCH_IOCTL_START _IOW(0xbc, 2, struct bch_ioctl_start)
+#define BCH_IOCTL_STOP _IO(0xbc, 3)
+#endif
+
+#define BCH_IOCTL_DISK_ADD _IOW(0xbc, 4, struct bch_ioctl_disk)
+#define BCH_IOCTL_DISK_REMOVE _IOW(0xbc, 5, struct bch_ioctl_disk)
+#define BCH_IOCTL_DISK_ONLINE _IOW(0xbc, 6, struct bch_ioctl_disk)
+#define BCH_IOCTL_DISK_OFFLINE _IOW(0xbc, 7, struct bch_ioctl_disk)
+#define BCH_IOCTL_DISK_SET_STATE _IOW(0xbc, 8, struct bch_ioctl_disk_set_state)
+#define BCH_IOCTL_DATA _IOW(0xbc, 10, struct bch_ioctl_data)
+#define BCH_IOCTL_FS_USAGE _IOWR(0xbc, 11, struct bch_ioctl_fs_usage)
+#define BCH_IOCTL_DEV_USAGE _IOWR(0xbc, 11, struct bch_ioctl_dev_usage)
+#define BCH_IOCTL_READ_SUPER _IOW(0xbc, 12, struct bch_ioctl_read_super)
+#define BCH_IOCTL_DISK_GET_IDX _IOW(0xbc, 13, struct bch_ioctl_disk_get_idx)
+#define BCH_IOCTL_DISK_RESIZE _IOW(0xbc, 14, struct bch_ioctl_disk_resize)
+#define BCH_IOCTL_DISK_RESIZE_JOURNAL _IOW(0xbc,15, struct bch_ioctl_disk_resize_journal)
+
+#define BCH_IOCTL_SUBVOLUME_CREATE _IOW(0xbc, 16, struct bch_ioctl_subvolume)
+#define BCH_IOCTL_SUBVOLUME_DESTROY _IOW(0xbc, 17, struct bch_ioctl_subvolume)
+
+/* ioctl below act on a particular file, not the filesystem as a whole: */
+
+#define BCHFS_IOC_REINHERIT_ATTRS _IOR(0xbc, 64, const char __user *)
+
+/*
+ * BCH_IOCTL_QUERY_UUID: get filesystem UUID
+ *
+ * Returns user visible UUID, not internal UUID (which may not ever be changed);
+ * the filesystem's sysfs directory may be found under /sys/fs/bcachefs with
+ * this UUID.
+ */
+struct bch_ioctl_query_uuid {
+ __uuid_t uuid;
+};
+
+#if 0
+struct bch_ioctl_start {
+ __u32 flags;
+ __u32 pad;
+};
+#endif
+
+/*
+ * BCH_IOCTL_DISK_ADD: add a new device to an existing filesystem
+ *
+ * The specified device must not be open or in use. On success, the new device
+ * will be an online member of the filesystem just like any other member.
+ *
+ * The device must first be prepared by userspace by formatting with a bcachefs
+ * superblock, which is only used for passing in superblock options/parameters
+ * for that device (in struct bch_member). The new device's superblock should
+ * not claim to be a member of any existing filesystem - UUIDs on it will be
+ * ignored.
+ */
+
+/*
+ * BCH_IOCTL_DISK_REMOVE: permanently remove a member device from a filesystem
+ *
+ * Any data present on @dev will be permanently deleted, and @dev will be
+ * removed from its slot in the filesystem's list of member devices. The device
+ * may be either offline or offline.
+ *
+ * Will fail removing @dev would leave us with insufficient read write devices
+ * or degraded/unavailable data, unless the approprate BCH_FORCE_IF_* flags are
+ * set.
+ */
+
+/*
+ * BCH_IOCTL_DISK_ONLINE: given a disk that is already a member of a filesystem
+ * but is not open (e.g. because we started in degraded mode), bring it online
+ *
+ * all existing data on @dev will be available once the device is online,
+ * exactly as if @dev was present when the filesystem was first mounted
+ */
+
+/*
+ * BCH_IOCTL_DISK_OFFLINE: offline a disk, causing the kernel to close that
+ * block device, without removing it from the filesystem (so it can be brought
+ * back online later)
+ *
+ * Data present on @dev will be unavailable while @dev is offline (unless
+ * replicated), but will still be intact and untouched if @dev is brought back
+ * online
+ *
+ * Will fail (similarly to BCH_IOCTL_DISK_SET_STATE) if offlining @dev would
+ * leave us with insufficient read write devices or degraded/unavailable data,
+ * unless the approprate BCH_FORCE_IF_* flags are set.
+ */
+
+struct bch_ioctl_disk {
+ __u32 flags;
+ __u32 pad;
+ __u64 dev;
+};
+
+/*
+ * BCH_IOCTL_DISK_SET_STATE: modify state of a member device of a filesystem
+ *
+ * @new_state - one of the bch_member_state states (rw, ro, failed,
+ * spare)
+ *
+ * Will refuse to change member state if we would then have insufficient devices
+ * to write to, or if it would result in degraded data (when @new_state is
+ * failed or spare) unless the appropriate BCH_FORCE_IF_* flags are set.
+ */
+struct bch_ioctl_disk_set_state {
+ __u32 flags;
+ __u8 new_state;
+ __u8 pad[3];
+ __u64 dev;
+};
+
+enum bch_data_ops {
+ BCH_DATA_OP_SCRUB = 0,
+ BCH_DATA_OP_REREPLICATE = 1,
+ BCH_DATA_OP_MIGRATE = 2,
+ BCH_DATA_OP_REWRITE_OLD_NODES = 3,
+ BCH_DATA_OP_NR = 4,
+};
+
+/*
+ * BCH_IOCTL_DATA: operations that walk and manipulate filesystem data (e.g.
+ * scrub, rereplicate, migrate).
+ *
+ * This ioctl kicks off a job in the background, and returns a file descriptor.
+ * Reading from the file descriptor returns a struct bch_ioctl_data_event,
+ * indicating current progress, and closing the file descriptor will stop the
+ * job. The file descriptor is O_CLOEXEC.
+ */
+struct bch_ioctl_data {
+ __u16 op;
+ __u8 start_btree;
+ __u8 end_btree;
+ __u32 flags;
+
+ struct bpos start_pos;
+ struct bpos end_pos;
+
+ union {
+ struct {
+ __u32 dev;
+ __u32 pad;
+ } migrate;
+ struct {
+ __u64 pad[8];
+ };
+ };
+} __packed __aligned(8);
+
+enum bch_data_event {
+ BCH_DATA_EVENT_PROGRESS = 0,
+ /* XXX: add an event for reporting errors */
+ BCH_DATA_EVENT_NR = 1,
+};
+
+struct bch_ioctl_data_progress {
+ __u8 data_type;
+ __u8 btree_id;
+ __u8 pad[2];
+ struct bpos pos;
+
+ __u64 sectors_done;
+ __u64 sectors_total;
+} __packed __aligned(8);
+
+struct bch_ioctl_data_event {
+ __u8 type;
+ __u8 pad[7];
+ union {
+ struct bch_ioctl_data_progress p;
+ __u64 pad2[15];
+ };
+} __packed __aligned(8);
+
+struct bch_replicas_usage {
+ __u64 sectors;
+ struct bch_replicas_entry r;
+} __packed;
+
+static inline struct bch_replicas_usage *
+replicas_usage_next(struct bch_replicas_usage *u)
+{
+ return (void *) u + replicas_entry_bytes(&u->r) + 8;
+}
+
+/*
+ * BCH_IOCTL_FS_USAGE: query filesystem disk space usage
+ *
+ * Returns disk space usage broken out by data type, number of replicas, and
+ * by component device
+ *
+ * @replica_entries_bytes - size, in bytes, allocated for replica usage entries
+ *
+ * On success, @replica_entries_bytes will be changed to indicate the number of
+ * bytes actually used.
+ *
+ * Returns -ERANGE if @replica_entries_bytes was too small
+ */
+struct bch_ioctl_fs_usage {
+ __u64 capacity;
+ __u64 used;
+ __u64 online_reserved;
+ __u64 persistent_reserved[BCH_REPLICAS_MAX];
+
+ __u32 replica_entries_bytes;
+ __u32 pad;
+
+ struct bch_replicas_usage replicas[0];
+};
+
+/*
+ * BCH_IOCTL_DEV_USAGE: query device disk space usage
+ *
+ * Returns disk space usage broken out by data type - both by buckets and
+ * sectors.
+ */
+struct bch_ioctl_dev_usage {
+ __u64 dev;
+ __u32 flags;
+ __u8 state;
+ __u8 pad[7];
+
+ __u32 bucket_size;
+ __u64 nr_buckets;
+
+ __u64 buckets_ec;
+
+ struct bch_ioctl_dev_usage_type {
+ __u64 buckets;
+ __u64 sectors;
+ __u64 fragmented;
+ } d[BCH_DATA_NR];
+};
+
+/*
+ * BCH_IOCTL_READ_SUPER: read filesystem superblock
+ *
+ * Equivalent to reading the superblock directly from the block device, except
+ * avoids racing with the kernel writing the superblock or having to figure out
+ * which block device to read
+ *
+ * @sb - buffer to read into
+ * @size - size of userspace allocated buffer
+ * @dev - device to read superblock for, if BCH_READ_DEV flag is
+ * specified
+ *
+ * Returns -ERANGE if buffer provided is too small
+ */
+struct bch_ioctl_read_super {
+ __u32 flags;
+ __u32 pad;
+ __u64 dev;
+ __u64 size;
+ __u64 sb;
+};
+
+/*
+ * BCH_IOCTL_DISK_GET_IDX: give a path to a block device, query filesystem to
+ * determine if disk is a (online) member - if so, returns device's index
+ *
+ * Returns -ENOENT if not found
+ */
+struct bch_ioctl_disk_get_idx {
+ __u64 dev;
+};
+
+/*
+ * BCH_IOCTL_DISK_RESIZE: resize filesystem on a device
+ *
+ * @dev - member to resize
+ * @nbuckets - new number of buckets
+ */
+struct bch_ioctl_disk_resize {
+ __u32 flags;
+ __u32 pad;
+ __u64 dev;
+ __u64 nbuckets;
+};
+
+/*
+ * BCH_IOCTL_DISK_RESIZE_JOURNAL: resize journal on a device
+ *
+ * @dev - member to resize
+ * @nbuckets - new number of buckets
+ */
+struct bch_ioctl_disk_resize_journal {
+ __u32 flags;
+ __u32 pad;
+ __u64 dev;
+ __u64 nbuckets;
+};
+
+struct bch_ioctl_subvolume {
+ __u32 flags;
+ __u32 dirfd;
+ __u16 mode;
+ __u16 pad[3];
+ __u64 dst_ptr;
+ __u64 src_ptr;
+};
+
+#define BCH_SUBVOL_SNAPSHOT_CREATE (1U << 0)
+#define BCH_SUBVOL_SNAPSHOT_RO (1U << 1)
+
+#endif /* _BCACHEFS_IOCTL_H */
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
new file mode 100644
index 000000000000..abdb05507d16
--- /dev/null
+++ b/fs/bcachefs/bkey.c
@@ -0,0 +1,1120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey.h"
+#include "bkey_cmp.h"
+#include "bkey_methods.h"
+#include "bset.h"
+#include "util.h"
+
+const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
+
+void bch2_bkey_packed_to_binary_text(struct printbuf *out,
+ const struct bkey_format *f,
+ const struct bkey_packed *k)
+{
+ const u64 *p = high_word(f, k);
+ unsigned word_bits = 64 - high_bit_offset;
+ unsigned nr_key_bits = bkey_format_key_bits(f) + high_bit_offset;
+ u64 v = *p & (~0ULL >> high_bit_offset);
+
+ if (!nr_key_bits) {
+ prt_str(out, "(empty)");
+ return;
+ }
+
+ while (1) {
+ unsigned next_key_bits = nr_key_bits;
+
+ if (nr_key_bits < 64) {
+ v >>= 64 - nr_key_bits;
+ next_key_bits = 0;
+ } else {
+ next_key_bits -= 64;
+ }
+
+ bch2_prt_u64_binary(out, v, min(word_bits, nr_key_bits));
+
+ if (!next_key_bits)
+ break;
+
+ prt_char(out, ' ');
+
+ p = next_word(p);
+ v = *p;
+ word_bits = 64;
+ nr_key_bits = next_key_bits;
+ }
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
+ const struct bkey *unpacked,
+ const struct bkey_format *format)
+{
+ struct bkey tmp;
+
+ BUG_ON(bkeyp_val_u64s(format, packed) !=
+ bkey_val_u64s(unpacked));
+
+ BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
+
+ tmp = __bch2_bkey_unpack_key(format, packed);
+
+ if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_printf(&buf, "keys differ: format u64s %u fields %u %u %u %u %u\n",
+ format->key_u64s,
+ format->bits_per_field[0],
+ format->bits_per_field[1],
+ format->bits_per_field[2],
+ format->bits_per_field[3],
+ format->bits_per_field[4]);
+
+ prt_printf(&buf, "compiled unpack: ");
+ bch2_bkey_to_text(&buf, unpacked);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "c unpack: ");
+ bch2_bkey_to_text(&buf, &tmp);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "compiled unpack: ");
+ bch2_bkey_packed_to_binary_text(&buf, &bch2_bkey_format_current,
+ (struct bkey_packed *) unpacked);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "c unpack: ");
+ bch2_bkey_packed_to_binary_text(&buf, &bch2_bkey_format_current,
+ (struct bkey_packed *) &tmp);
+ prt_newline(&buf);
+
+ panic("%s", buf.buf);
+ }
+}
+
+#else
+static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed,
+ const struct bkey *unpacked,
+ const struct bkey_format *format) {}
+#endif
+
+struct pack_state {
+ const struct bkey_format *format;
+ unsigned bits; /* bits remaining in current word */
+ u64 w; /* current word */
+ u64 *p; /* pointer to next word */
+};
+
+__always_inline
+static struct pack_state pack_state_init(const struct bkey_format *format,
+ struct bkey_packed *k)
+{
+ u64 *p = high_word(format, k);
+
+ return (struct pack_state) {
+ .format = format,
+ .bits = 64 - high_bit_offset,
+ .w = 0,
+ .p = p,
+ };
+}
+
+__always_inline
+static void pack_state_finish(struct pack_state *state,
+ struct bkey_packed *k)
+{
+ EBUG_ON(state->p < k->_data);
+ EBUG_ON(state->p >= (u64 *) k->_data + state->format->key_u64s);
+
+ *state->p = state->w;
+}
+
+struct unpack_state {
+ const struct bkey_format *format;
+ unsigned bits; /* bits remaining in current word */
+ u64 w; /* current word */
+ const u64 *p; /* pointer to next word */
+};
+
+__always_inline
+static struct unpack_state unpack_state_init(const struct bkey_format *format,
+ const struct bkey_packed *k)
+{
+ const u64 *p = high_word(format, k);
+
+ return (struct unpack_state) {
+ .format = format,
+ .bits = 64 - high_bit_offset,
+ .w = *p << high_bit_offset,
+ .p = p,
+ };
+}
+
+__always_inline
+static u64 get_inc_field(struct unpack_state *state, unsigned field)
+{
+ unsigned bits = state->format->bits_per_field[field];
+ u64 v = 0, offset = le64_to_cpu(state->format->field_offset[field]);
+
+ if (bits >= state->bits) {
+ v = state->w >> (64 - bits);
+ bits -= state->bits;
+
+ state->p = next_word(state->p);
+ state->w = *state->p;
+ state->bits = 64;
+ }
+
+ /* avoid shift by 64 if bits is 0 - bits is never 64 here: */
+ v |= (state->w >> 1) >> (63 - bits);
+ state->w <<= bits;
+ state->bits -= bits;
+
+ return v + offset;
+}
+
+__always_inline
+static void __set_inc_field(struct pack_state *state, unsigned field, u64 v)
+{
+ unsigned bits = state->format->bits_per_field[field];
+
+ if (bits) {
+ if (bits > state->bits) {
+ bits -= state->bits;
+ /* avoid shift by 64 if bits is 64 - bits is never 0 here: */
+ state->w |= (v >> 1) >> (bits - 1);
+
+ *state->p = state->w;
+ state->p = next_word(state->p);
+ state->w = 0;
+ state->bits = 64;
+ }
+
+ state->bits -= bits;
+ state->w |= v << state->bits;
+ }
+}
+
+__always_inline
+static bool set_inc_field(struct pack_state *state, unsigned field, u64 v)
+{
+ unsigned bits = state->format->bits_per_field[field];
+ u64 offset = le64_to_cpu(state->format->field_offset[field]);
+
+ if (v < offset)
+ return false;
+
+ v -= offset;
+
+ if (fls64(v) > bits)
+ return false;
+
+ __set_inc_field(state, field, v);
+ return true;
+}
+
+/*
+ * Note: does NOT set out->format (we don't know what it should be here!)
+ *
+ * Also: doesn't work on extents - it doesn't preserve the invariant that
+ * if k is packed bkey_start_pos(k) will successfully pack
+ */
+static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
+ struct bkey_packed *out,
+ const struct bkey_format *in_f,
+ const struct bkey_packed *in)
+{
+ struct pack_state out_s = pack_state_init(out_f, out);
+ struct unpack_state in_s = unpack_state_init(in_f, in);
+ u64 *w = out->_data;
+ unsigned i;
+
+ *w = 0;
+
+ for (i = 0; i < BKEY_NR_FIELDS; i++)
+ if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
+ return false;
+
+ /* Can't happen because the val would be too big to unpack: */
+ EBUG_ON(in->u64s - in_f->key_u64s + out_f->key_u64s > U8_MAX);
+
+ pack_state_finish(&out_s, out);
+ out->u64s = out_f->key_u64s + in->u64s - in_f->key_u64s;
+ out->needs_whiteout = in->needs_whiteout;
+ out->type = in->type;
+
+ return true;
+}
+
+bool bch2_bkey_transform(const struct bkey_format *out_f,
+ struct bkey_packed *out,
+ const struct bkey_format *in_f,
+ const struct bkey_packed *in)
+{
+ if (!bch2_bkey_transform_key(out_f, out, in_f, in))
+ return false;
+
+ memcpy_u64s((u64 *) out + out_f->key_u64s,
+ (u64 *) in + in_f->key_u64s,
+ (in->u64s - in_f->key_u64s));
+ return true;
+}
+
+struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format,
+ const struct bkey_packed *in)
+{
+ struct unpack_state state = unpack_state_init(format, in);
+ struct bkey out;
+
+ EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
+ EBUG_ON(in->u64s < format->key_u64s);
+ EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
+ EBUG_ON(in->u64s - format->key_u64s + BKEY_U64s > U8_MAX);
+
+ out.u64s = BKEY_U64s + in->u64s - format->key_u64s;
+ out.format = KEY_FORMAT_CURRENT;
+ out.needs_whiteout = in->needs_whiteout;
+ out.type = in->type;
+ out.pad[0] = 0;
+
+#define x(id, field) out.field = get_inc_field(&state, id);
+ bkey_fields()
+#undef x
+
+ return out;
+}
+
+#ifndef HAVE_BCACHEFS_COMPILED_UNPACK
+struct bpos __bkey_unpack_pos(const struct bkey_format *format,
+ const struct bkey_packed *in)
+{
+ struct unpack_state state = unpack_state_init(format, in);
+ struct bpos out;
+
+ EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
+ EBUG_ON(in->u64s < format->key_u64s);
+ EBUG_ON(in->format != KEY_FORMAT_LOCAL_BTREE);
+
+ out.inode = get_inc_field(&state, BKEY_FIELD_INODE);
+ out.offset = get_inc_field(&state, BKEY_FIELD_OFFSET);
+ out.snapshot = get_inc_field(&state, BKEY_FIELD_SNAPSHOT);
+
+ return out;
+}
+#endif
+
+/**
+ * bch2_bkey_pack_key -- pack just the key, not the value
+ * @out: packed result
+ * @in: key to pack
+ * @format: format of packed result
+ *
+ * Returns: true on success, false on failure
+ */
+bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
+ const struct bkey_format *format)
+{
+ struct pack_state state = pack_state_init(format, out);
+ u64 *w = out->_data;
+
+ EBUG_ON((void *) in == (void *) out);
+ EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
+ EBUG_ON(in->format != KEY_FORMAT_CURRENT);
+
+ *w = 0;
+
+#define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
+ bkey_fields()
+#undef x
+ pack_state_finish(&state, out);
+ out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
+ out->format = KEY_FORMAT_LOCAL_BTREE;
+ out->needs_whiteout = in->needs_whiteout;
+ out->type = in->type;
+
+ bch2_bkey_pack_verify(out, in, format);
+ return true;
+}
+
+/**
+ * bch2_bkey_unpack -- unpack the key and the value
+ * @b: btree node of @src key (for packed format)
+ * @dst: unpacked result
+ * @src: packed input
+ */
+void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
+ const struct bkey_packed *src)
+{
+ __bkey_unpack_key(b, &dst->k, src);
+
+ memcpy_u64s(&dst->v,
+ bkeyp_val(&b->format, src),
+ bkeyp_val_u64s(&b->format, src));
+}
+
+/**
+ * bch2_bkey_pack -- pack the key and the value
+ * @dst: packed result
+ * @src: unpacked input
+ * @format: format of packed result
+ *
+ * Returns: true on success, false on failure
+ */
+bool bch2_bkey_pack(struct bkey_packed *dst, const struct bkey_i *src,
+ const struct bkey_format *format)
+{
+ struct bkey_packed tmp;
+
+ if (!bch2_bkey_pack_key(&tmp, &src->k, format))
+ return false;
+
+ memmove_u64s((u64 *) dst + format->key_u64s,
+ &src->v,
+ bkey_val_u64s(&src->k));
+ memcpy_u64s_small(dst, &tmp, format->key_u64s);
+
+ return true;
+}
+
+__always_inline
+static bool set_inc_field_lossy(struct pack_state *state, unsigned field, u64 v)
+{
+ unsigned bits = state->format->bits_per_field[field];
+ u64 offset = le64_to_cpu(state->format->field_offset[field]);
+ bool ret = true;
+
+ EBUG_ON(v < offset);
+ v -= offset;
+
+ if (fls64(v) > bits) {
+ v = ~(~0ULL << bits);
+ ret = false;
+ }
+
+ __set_inc_field(state, field, v);
+ return ret;
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+static bool bkey_packed_successor(struct bkey_packed *out,
+ const struct btree *b,
+ struct bkey_packed k)
+{
+ const struct bkey_format *f = &b->format;
+ unsigned nr_key_bits = b->nr_key_bits;
+ unsigned first_bit, offset;
+ u64 *p;
+
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
+
+ if (!nr_key_bits)
+ return false;
+
+ *out = k;
+
+ first_bit = high_bit_offset + nr_key_bits - 1;
+ p = nth_word(high_word(f, out), first_bit >> 6);
+ offset = 63 - (first_bit & 63);
+
+ while (nr_key_bits) {
+ unsigned bits = min(64 - offset, nr_key_bits);
+ u64 mask = (~0ULL >> (64 - bits)) << offset;
+
+ if ((*p & mask) != mask) {
+ *p += 1ULL << offset;
+ EBUG_ON(bch2_bkey_cmp_packed(b, out, &k) <= 0);
+ return true;
+ }
+
+ *p &= ~mask;
+ p = prev_word(p);
+ nr_key_bits -= bits;
+ offset = 0;
+ }
+
+ return false;
+}
+
+static bool bkey_format_has_too_big_fields(const struct bkey_format *f)
+{
+ for (unsigned i = 0; i < f->nr_fields; i++) {
+ unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
+ u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
+ u64 packed_max = f->bits_per_field[i]
+ ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
+ : 0;
+ u64 field_offset = le64_to_cpu(f->field_offset[i]);
+
+ if (packed_max + field_offset < packed_max ||
+ packed_max + field_offset > unpacked_max)
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+/*
+ * Returns a packed key that compares <= in
+ *
+ * This is used in bset_search_tree(), where we need a packed pos in order to be
+ * able to compare against the keys in the auxiliary search tree - and it's
+ * legal to use a packed pos that isn't equivalent to the original pos,
+ * _provided_ it compares <= to the original pos.
+ */
+enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
+ struct bpos in,
+ const struct btree *b)
+{
+ const struct bkey_format *f = &b->format;
+ struct pack_state state = pack_state_init(f, out);
+ u64 *w = out->_data;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct bpos orig = in;
+#endif
+ bool exact = true;
+ unsigned i;
+
+ /*
+ * bch2_bkey_pack_key() will write to all of f->key_u64s, minus the 3
+ * byte header, but pack_pos() won't if the len/version fields are big
+ * enough - we need to make sure to zero them out:
+ */
+ for (i = 0; i < f->key_u64s; i++)
+ w[i] = 0;
+
+ if (unlikely(in.snapshot <
+ le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
+ if (!in.offset-- &&
+ !in.inode--)
+ return BKEY_PACK_POS_FAIL;
+ in.snapshot = KEY_SNAPSHOT_MAX;
+ exact = false;
+ }
+
+ if (unlikely(in.offset <
+ le64_to_cpu(f->field_offset[BKEY_FIELD_OFFSET]))) {
+ if (!in.inode--)
+ return BKEY_PACK_POS_FAIL;
+ in.offset = KEY_OFFSET_MAX;
+ in.snapshot = KEY_SNAPSHOT_MAX;
+ exact = false;
+ }
+
+ if (unlikely(in.inode <
+ le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
+ return BKEY_PACK_POS_FAIL;
+
+ if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode))) {
+ in.offset = KEY_OFFSET_MAX;
+ in.snapshot = KEY_SNAPSHOT_MAX;
+ exact = false;
+ }
+
+ if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset))) {
+ in.snapshot = KEY_SNAPSHOT_MAX;
+ exact = false;
+ }
+
+ if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot)))
+ exact = false;
+
+ pack_state_finish(&state, out);
+ out->u64s = f->key_u64s;
+ out->format = KEY_FORMAT_LOCAL_BTREE;
+ out->type = KEY_TYPE_deleted;
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ if (exact) {
+ BUG_ON(bkey_cmp_left_packed(b, out, &orig));
+ } else {
+ struct bkey_packed successor;
+
+ BUG_ON(bkey_cmp_left_packed(b, out, &orig) >= 0);
+ BUG_ON(bkey_packed_successor(&successor, b, *out) &&
+ bkey_cmp_left_packed(b, &successor, &orig) < 0 &&
+ !bkey_format_has_too_big_fields(f));
+ }
+#endif
+
+ return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER;
+}
+
+void bch2_bkey_format_init(struct bkey_format_state *s)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(s->field_min); i++)
+ s->field_min[i] = U64_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->field_max); i++)
+ s->field_max[i] = 0;
+
+ /* Make sure we can store a size of 0: */
+ s->field_min[BKEY_FIELD_SIZE] = 0;
+}
+
+void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
+{
+ unsigned field = 0;
+
+ __bkey_format_add(s, field++, p.inode);
+ __bkey_format_add(s, field++, p.offset);
+ __bkey_format_add(s, field++, p.snapshot);
+}
+
+/*
+ * We don't want it to be possible for the packed format to represent fields
+ * bigger than a u64... that will cause confusion and issues (like with
+ * bkey_packed_successor())
+ */
+static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
+ unsigned bits, u64 offset)
+{
+ unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
+ u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
+
+ bits = min(bits, unpacked_bits);
+
+ offset = bits == unpacked_bits ? 0 : min(offset, unpacked_max - ((1ULL << bits) - 1));
+
+ f->bits_per_field[i] = bits;
+ f->field_offset[i] = cpu_to_le64(offset);
+}
+
+struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s)
+{
+ unsigned i, bits = KEY_PACKED_BITS_START;
+ struct bkey_format ret = {
+ .nr_fields = BKEY_NR_FIELDS,
+ };
+
+ for (i = 0; i < ARRAY_SIZE(s->field_min); i++) {
+ s->field_min[i] = min(s->field_min[i], s->field_max[i]);
+
+ set_format_field(&ret, i,
+ fls64(s->field_max[i] - s->field_min[i]),
+ s->field_min[i]);
+
+ bits += ret.bits_per_field[i];
+ }
+
+ /* allow for extent merging: */
+ if (ret.bits_per_field[BKEY_FIELD_SIZE]) {
+ unsigned b = min(4U, 32U - ret.bits_per_field[BKEY_FIELD_SIZE]);
+
+ ret.bits_per_field[BKEY_FIELD_SIZE] += b;
+ bits += b;
+ }
+
+ ret.key_u64s = DIV_ROUND_UP(bits, 64);
+
+ /* if we have enough spare bits, round fields up to nearest byte */
+ bits = ret.key_u64s * 64 - bits;
+
+ for (i = 0; i < ARRAY_SIZE(ret.bits_per_field); i++) {
+ unsigned r = round_up(ret.bits_per_field[i], 8) -
+ ret.bits_per_field[i];
+
+ if (r <= bits) {
+ set_format_field(&ret, i,
+ ret.bits_per_field[i] + r,
+ le64_to_cpu(ret.field_offset[i]));
+ bits -= r;
+ }
+ }
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ {
+ struct printbuf buf = PRINTBUF;
+
+ BUG_ON(bch2_bkey_format_invalid(NULL, &ret, 0, &buf));
+ printbuf_exit(&buf);
+ }
+#endif
+ return ret;
+}
+
+int bch2_bkey_format_invalid(struct bch_fs *c,
+ struct bkey_format *f,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ unsigned i, bits = KEY_PACKED_BITS_START;
+
+ if (f->nr_fields != BKEY_NR_FIELDS) {
+ prt_printf(err, "incorrect number of fields: got %u, should be %u",
+ f->nr_fields, BKEY_NR_FIELDS);
+ return -BCH_ERR_invalid;
+ }
+
+ /*
+ * Verify that the packed format can't represent fields larger than the
+ * unpacked format:
+ */
+ for (i = 0; i < f->nr_fields; i++) {
+ if (!c || c->sb.version_min >= bcachefs_metadata_version_snapshot) {
+ unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
+ u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
+ u64 packed_max = f->bits_per_field[i]
+ ? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
+ : 0;
+ u64 field_offset = le64_to_cpu(f->field_offset[i]);
+
+ if (packed_max + field_offset < packed_max ||
+ packed_max + field_offset > unpacked_max) {
+ prt_printf(err, "field %u too large: %llu + %llu > %llu",
+ i, packed_max, field_offset, unpacked_max);
+ return -BCH_ERR_invalid;
+ }
+ }
+
+ bits += f->bits_per_field[i];
+ }
+
+ if (f->key_u64s != DIV_ROUND_UP(bits, 64)) {
+ prt_printf(err, "incorrect key_u64s: got %u, should be %u",
+ f->key_u64s, DIV_ROUND_UP(bits, 64));
+ return -BCH_ERR_invalid;
+ }
+
+ return 0;
+}
+
+void bch2_bkey_format_to_text(struct printbuf *out, const struct bkey_format *f)
+{
+ prt_printf(out, "u64s %u fields ", f->key_u64s);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(f->bits_per_field); i++) {
+ if (i)
+ prt_str(out, ", ");
+ prt_printf(out, "%u:%llu",
+ f->bits_per_field[i],
+ le64_to_cpu(f->field_offset[i]));
+ }
+}
+
+/*
+ * Most significant differing bit
+ * Bits are indexed from 0 - return is [0, nr_key_bits)
+ */
+__pure
+unsigned bch2_bkey_greatest_differing_bit(const struct btree *b,
+ const struct bkey_packed *l_k,
+ const struct bkey_packed *r_k)
+{
+ const u64 *l = high_word(&b->format, l_k);
+ const u64 *r = high_word(&b->format, r_k);
+ unsigned nr_key_bits = b->nr_key_bits;
+ unsigned word_bits = 64 - high_bit_offset;
+ u64 l_v, r_v;
+
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
+
+ /* for big endian, skip past header */
+ l_v = *l & (~0ULL >> high_bit_offset);
+ r_v = *r & (~0ULL >> high_bit_offset);
+
+ while (nr_key_bits) {
+ if (nr_key_bits < word_bits) {
+ l_v >>= word_bits - nr_key_bits;
+ r_v >>= word_bits - nr_key_bits;
+ nr_key_bits = 0;
+ } else {
+ nr_key_bits -= word_bits;
+ }
+
+ if (l_v != r_v)
+ return fls64(l_v ^ r_v) - 1 + nr_key_bits;
+
+ l = next_word(l);
+ r = next_word(r);
+
+ l_v = *l;
+ r_v = *r;
+ word_bits = 64;
+ }
+
+ return 0;
+}
+
+/*
+ * First set bit
+ * Bits are indexed from 0 - return is [0, nr_key_bits)
+ */
+__pure
+unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k)
+{
+ const u64 *p = high_word(&b->format, k);
+ unsigned nr_key_bits = b->nr_key_bits;
+ unsigned ret = 0, offset;
+
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(&b->format));
+
+ offset = nr_key_bits;
+ while (offset > 64) {
+ p = next_word(p);
+ offset -= 64;
+ }
+
+ offset = 64 - offset;
+
+ while (nr_key_bits) {
+ unsigned bits = nr_key_bits + offset < 64
+ ? nr_key_bits
+ : 64 - offset;
+
+ u64 mask = (~0ULL >> (64 - bits)) << offset;
+
+ if (*p & mask)
+ return ret + __ffs64(*p & mask) - offset;
+
+ p = prev_word(p);
+ nr_key_bits -= bits;
+ ret += bits;
+ offset = 0;
+ }
+
+ return 0;
+}
+
+#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
+
+#define I(_x) (*(out)++ = (_x))
+#define I1(i0) I(i0)
+#define I2(i0, i1) (I1(i0), I(i1))
+#define I3(i0, i1, i2) (I2(i0, i1), I(i2))
+#define I4(i0, i1, i2, i3) (I3(i0, i1, i2), I(i3))
+#define I5(i0, i1, i2, i3, i4) (I4(i0, i1, i2, i3), I(i4))
+
+static u8 *compile_bkey_field(const struct bkey_format *format, u8 *out,
+ enum bch_bkey_fields field,
+ unsigned dst_offset, unsigned dst_size,
+ bool *eax_zeroed)
+{
+ unsigned bits = format->bits_per_field[field];
+ u64 offset = le64_to_cpu(format->field_offset[field]);
+ unsigned i, byte, bit_offset, align, shl, shr;
+
+ if (!bits && !offset) {
+ if (!*eax_zeroed) {
+ /* xor eax, eax */
+ I2(0x31, 0xc0);
+ }
+
+ *eax_zeroed = true;
+ goto set_field;
+ }
+
+ if (!bits) {
+ /* just return offset: */
+
+ switch (dst_size) {
+ case 8:
+ if (offset > S32_MAX) {
+ /* mov [rdi + dst_offset], offset */
+ I3(0xc7, 0x47, dst_offset);
+ memcpy(out, &offset, 4);
+ out += 4;
+
+ I3(0xc7, 0x47, dst_offset + 4);
+ memcpy(out, (void *) &offset + 4, 4);
+ out += 4;
+ } else {
+ /* mov [rdi + dst_offset], offset */
+ /* sign extended */
+ I4(0x48, 0xc7, 0x47, dst_offset);
+ memcpy(out, &offset, 4);
+ out += 4;
+ }
+ break;
+ case 4:
+ /* mov [rdi + dst_offset], offset */
+ I3(0xc7, 0x47, dst_offset);
+ memcpy(out, &offset, 4);
+ out += 4;
+ break;
+ default:
+ BUG();
+ }
+
+ return out;
+ }
+
+ bit_offset = format->key_u64s * 64;
+ for (i = 0; i <= field; i++)
+ bit_offset -= format->bits_per_field[i];
+
+ byte = bit_offset / 8;
+ bit_offset -= byte * 8;
+
+ *eax_zeroed = false;
+
+ if (bit_offset == 0 && bits == 8) {
+ /* movzx eax, BYTE PTR [rsi + imm8] */
+ I4(0x0f, 0xb6, 0x46, byte);
+ } else if (bit_offset == 0 && bits == 16) {
+ /* movzx eax, WORD PTR [rsi + imm8] */
+ I4(0x0f, 0xb7, 0x46, byte);
+ } else if (bit_offset + bits <= 32) {
+ align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
+ byte -= align;
+ bit_offset += align * 8;
+
+ BUG_ON(bit_offset + bits > 32);
+
+ /* mov eax, [rsi + imm8] */
+ I3(0x8b, 0x46, byte);
+
+ if (bit_offset) {
+ /* shr eax, imm8 */
+ I3(0xc1, 0xe8, bit_offset);
+ }
+
+ if (bit_offset + bits < 32) {
+ unsigned mask = ~0U >> (32 - bits);
+
+ /* and eax, imm32 */
+ I1(0x25);
+ memcpy(out, &mask, 4);
+ out += 4;
+ }
+ } else if (bit_offset + bits <= 64) {
+ align = min(8 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 7);
+ byte -= align;
+ bit_offset += align * 8;
+
+ BUG_ON(bit_offset + bits > 64);
+
+ /* mov rax, [rsi + imm8] */
+ I4(0x48, 0x8b, 0x46, byte);
+
+ shl = 64 - bit_offset - bits;
+ shr = bit_offset + shl;
+
+ if (shl) {
+ /* shl rax, imm8 */
+ I4(0x48, 0xc1, 0xe0, shl);
+ }
+
+ if (shr) {
+ /* shr rax, imm8 */
+ I4(0x48, 0xc1, 0xe8, shr);
+ }
+ } else {
+ align = min(4 - DIV_ROUND_UP(bit_offset + bits, 8), byte & 3);
+ byte -= align;
+ bit_offset += align * 8;
+
+ BUG_ON(bit_offset + bits > 96);
+
+ /* mov rax, [rsi + byte] */
+ I4(0x48, 0x8b, 0x46, byte);
+
+ /* mov edx, [rsi + byte + 8] */
+ I3(0x8b, 0x56, byte + 8);
+
+ /* bits from next word: */
+ shr = bit_offset + bits - 64;
+ BUG_ON(shr > bit_offset);
+
+ /* shr rax, bit_offset */
+ I4(0x48, 0xc1, 0xe8, shr);
+
+ /* shl rdx, imm8 */
+ I4(0x48, 0xc1, 0xe2, 64 - shr);
+
+ /* or rax, rdx */
+ I3(0x48, 0x09, 0xd0);
+
+ shr = bit_offset - shr;
+
+ if (shr) {
+ /* shr rax, imm8 */
+ I4(0x48, 0xc1, 0xe8, shr);
+ }
+ }
+
+ /* rax += offset: */
+ if (offset > S32_MAX) {
+ /* mov rdx, imm64 */
+ I2(0x48, 0xba);
+ memcpy(out, &offset, 8);
+ out += 8;
+ /* add %rdx, %rax */
+ I3(0x48, 0x01, 0xd0);
+ } else if (offset + (~0ULL >> (64 - bits)) > U32_MAX) {
+ /* add rax, imm32 */
+ I2(0x48, 0x05);
+ memcpy(out, &offset, 4);
+ out += 4;
+ } else if (offset) {
+ /* add eax, imm32 */
+ I1(0x05);
+ memcpy(out, &offset, 4);
+ out += 4;
+ }
+set_field:
+ switch (dst_size) {
+ case 8:
+ /* mov [rdi + dst_offset], rax */
+ I4(0x48, 0x89, 0x47, dst_offset);
+ break;
+ case 4:
+ /* mov [rdi + dst_offset], eax */
+ I3(0x89, 0x47, dst_offset);
+ break;
+ default:
+ BUG();
+ }
+
+ return out;
+}
+
+int bch2_compile_bkey_format(const struct bkey_format *format, void *_out)
+{
+ bool eax_zeroed = false;
+ u8 *out = _out;
+
+ /*
+ * rdi: dst - unpacked key
+ * rsi: src - packed key
+ */
+
+ /* k->u64s, k->format, k->type */
+
+ /* mov eax, [rsi] */
+ I2(0x8b, 0x06);
+
+ /* add eax, BKEY_U64s - format->key_u64s */
+ I5(0x05, BKEY_U64s - format->key_u64s, KEY_FORMAT_CURRENT, 0, 0);
+
+ /* and eax, imm32: mask out k->pad: */
+ I5(0x25, 0xff, 0xff, 0xff, 0);
+
+ /* mov [rdi], eax */
+ I2(0x89, 0x07);
+
+#define x(id, field) \
+ out = compile_bkey_field(format, out, id, \
+ offsetof(struct bkey, field), \
+ sizeof(((struct bkey *) NULL)->field), \
+ &eax_zeroed);
+ bkey_fields()
+#undef x
+
+ /* retq */
+ I1(0xc3);
+
+ return (void *) out - _out;
+}
+
+#else
+#endif
+
+__pure
+int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l,
+ const struct bkey_packed *r,
+ const struct btree *b)
+{
+ return __bch2_bkey_cmp_packed_format_checked_inlined(l, r, b);
+}
+
+__pure __flatten
+int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bpos *r)
+{
+ return bpos_cmp(bkey_unpack_pos_format_checked(b, l), *r);
+}
+
+__pure __flatten
+int bch2_bkey_cmp_packed(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r)
+{
+ return bch2_bkey_cmp_packed_inlined(b, l, r);
+}
+
+__pure __flatten
+int __bch2_bkey_cmp_left_packed(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bpos *r)
+{
+ const struct bkey *l_unpacked;
+
+ return unlikely(l_unpacked = packed_to_bkey_c(l))
+ ? bpos_cmp(l_unpacked->p, *r)
+ : __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
+}
+
+void bch2_bpos_swab(struct bpos *p)
+{
+ u8 *l = (u8 *) p;
+ u8 *h = ((u8 *) &p[1]) - 1;
+
+ while (l < h) {
+ swap(*l, *h);
+ l++;
+ --h;
+ }
+}
+
+void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
+{
+ const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
+ u8 *l = k->key_start;
+ u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
+
+ while (l < h) {
+ swap(*l, *h);
+ l++;
+ --h;
+ }
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_bkey_pack_test(void)
+{
+ struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0);
+ struct bkey_packed p;
+
+ struct bkey_format test_format = {
+ .key_u64s = 3,
+ .nr_fields = BKEY_NR_FIELDS,
+ .bits_per_field = {
+ 13,
+ 64,
+ 32,
+ },
+ };
+
+ struct unpack_state in_s =
+ unpack_state_init(&bch2_bkey_format_current, (void *) &t);
+ struct pack_state out_s = pack_state_init(&test_format, &p);
+ unsigned i;
+
+ for (i = 0; i < out_s.format->nr_fields; i++) {
+ u64 a, v = get_inc_field(&in_s, i);
+
+ switch (i) {
+#define x(id, field) case id: a = t.field; break;
+ bkey_fields()
+#undef x
+ default:
+ BUG();
+ }
+
+ if (a != v)
+ panic("got %llu actual %llu i %u\n", v, a, i);
+
+ if (!set_inc_field(&out_s, i, v))
+ panic("failed at %u\n", i);
+ }
+
+ BUG_ON(!bch2_bkey_pack_key(&p, &t, &test_format));
+}
+#endif
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
new file mode 100644
index 000000000000..518450209236
--- /dev/null
+++ b/fs/bcachefs/bkey.h
@@ -0,0 +1,782 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BKEY_H
+#define _BCACHEFS_BKEY_H
+
+#include <linux/bug.h>
+#include "bcachefs_format.h"
+
+#include "btree_types.h"
+#include "util.h"
+#include "vstructs.h"
+
+enum bkey_invalid_flags {
+ BKEY_INVALID_WRITE = (1U << 0),
+ BKEY_INVALID_COMMIT = (1U << 1),
+ BKEY_INVALID_JOURNAL = (1U << 2),
+};
+
+#if 0
+
+/*
+ * compiled unpack functions are disabled, pending a new interface for
+ * dynamically allocating executable memory:
+ */
+
+#ifdef CONFIG_X86_64
+#define HAVE_BCACHEFS_COMPILED_UNPACK 1
+#endif
+#endif
+
+void bch2_bkey_packed_to_binary_text(struct printbuf *,
+ const struct bkey_format *,
+ const struct bkey_packed *);
+
+/* bkey with split value, const */
+struct bkey_s_c {
+ const struct bkey *k;
+ const struct bch_val *v;
+};
+
+/* bkey with split value */
+struct bkey_s {
+ union {
+ struct {
+ struct bkey *k;
+ struct bch_val *v;
+ };
+ struct bkey_s_c s_c;
+ };
+};
+
+#define bkey_p_next(_k) vstruct_next(_k)
+
+static inline struct bkey_i *bkey_next(struct bkey_i *k)
+{
+ return (struct bkey_i *) ((u64 *) k->_data + k->k.u64s);
+}
+
+#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
+
+static inline size_t bkey_val_bytes(const struct bkey *k)
+{
+ return bkey_val_u64s(k) * sizeof(u64);
+}
+
+static inline void set_bkey_val_u64s(struct bkey *k, unsigned val_u64s)
+{
+ unsigned u64s = BKEY_U64s + val_u64s;
+
+ BUG_ON(u64s > U8_MAX);
+ k->u64s = u64s;
+}
+
+static inline void set_bkey_val_bytes(struct bkey *k, unsigned bytes)
+{
+ set_bkey_val_u64s(k, DIV_ROUND_UP(bytes, sizeof(u64)));
+}
+
+#define bkey_val_end(_k) ((void *) (((u64 *) (_k).v) + bkey_val_u64s((_k).k)))
+
+#define bkey_deleted(_k) ((_k)->type == KEY_TYPE_deleted)
+
+#define bkey_whiteout(_k) \
+ ((_k)->type == KEY_TYPE_deleted || (_k)->type == KEY_TYPE_whiteout)
+
+enum bkey_lr_packed {
+ BKEY_PACKED_BOTH,
+ BKEY_PACKED_RIGHT,
+ BKEY_PACKED_LEFT,
+ BKEY_PACKED_NONE,
+};
+
+#define bkey_lr_packed(_l, _r) \
+ ((_l)->format + ((_r)->format << 1))
+
+#define bkey_copy(_dst, _src) \
+do { \
+ BUILD_BUG_ON(!type_is(_dst, struct bkey_i *) && \
+ !type_is(_dst, struct bkey_packed *)); \
+ BUILD_BUG_ON(!type_is(_src, struct bkey_i *) && \
+ !type_is(_src, struct bkey_packed *)); \
+ EBUG_ON((u64 *) (_dst) > (u64 *) (_src) && \
+ (u64 *) (_dst) < (u64 *) (_src) + \
+ ((struct bkey *) (_src))->u64s); \
+ \
+ memcpy_u64s_small((_dst), (_src), \
+ ((struct bkey *) (_src))->u64s); \
+} while (0)
+
+struct btree;
+
+__pure
+unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
+ const struct bkey_packed *,
+ const struct bkey_packed *);
+__pure
+unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *);
+
+__pure
+int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *,
+ const struct bkey_packed *,
+ const struct btree *);
+
+__pure
+int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *,
+ const struct bkey_packed *,
+ const struct bpos *);
+
+__pure
+int bch2_bkey_cmp_packed(const struct btree *,
+ const struct bkey_packed *,
+ const struct bkey_packed *);
+
+__pure
+int __bch2_bkey_cmp_left_packed(const struct btree *,
+ const struct bkey_packed *,
+ const struct bpos *);
+
+static inline __pure
+int bkey_cmp_left_packed(const struct btree *b,
+ const struct bkey_packed *l, const struct bpos *r)
+{
+ return __bch2_bkey_cmp_left_packed(b, l, r);
+}
+
+/*
+ * The compiler generates better code when we pass bpos by ref, but it's often
+ * enough terribly convenient to pass it by val... as much as I hate c++, const
+ * ref would be nice here:
+ */
+__pure __flatten
+static inline int bkey_cmp_left_packed_byval(const struct btree *b,
+ const struct bkey_packed *l,
+ struct bpos r)
+{
+ return bkey_cmp_left_packed(b, l, &r);
+}
+
+static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
+{
+ return !((l.inode ^ r.inode) |
+ (l.offset ^ r.offset) |
+ (l.snapshot ^ r.snapshot));
+}
+
+static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode ? l.inode < r.inode :
+ l.offset != r.offset ? l.offset < r.offset :
+ l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
+}
+
+static __always_inline bool bpos_le(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode ? l.inode < r.inode :
+ l.offset != r.offset ? l.offset < r.offset :
+ l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
+}
+
+static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
+{
+ return bpos_lt(r, l);
+}
+
+static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
+{
+ return bpos_le(r, l);
+}
+
+static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
+{
+ return cmp_int(l.inode, r.inode) ?:
+ cmp_int(l.offset, r.offset) ?:
+ cmp_int(l.snapshot, r.snapshot);
+}
+
+static inline struct bpos bpos_min(struct bpos l, struct bpos r)
+{
+ return bpos_lt(l, r) ? l : r;
+}
+
+static inline struct bpos bpos_max(struct bpos l, struct bpos r)
+{
+ return bpos_gt(l, r) ? l : r;
+}
+
+static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
+{
+ return !((l.inode ^ r.inode) |
+ (l.offset ^ r.offset));
+}
+
+static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode
+ ? l.inode < r.inode
+ : l.offset < r.offset;
+}
+
+static __always_inline bool bkey_le(struct bpos l, struct bpos r)
+{
+ return l.inode != r.inode
+ ? l.inode < r.inode
+ : l.offset <= r.offset;
+}
+
+static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
+{
+ return bkey_lt(r, l);
+}
+
+static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
+{
+ return bkey_le(r, l);
+}
+
+static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
+{
+ return cmp_int(l.inode, r.inode) ?:
+ cmp_int(l.offset, r.offset);
+}
+
+static inline struct bpos bkey_min(struct bpos l, struct bpos r)
+{
+ return bkey_lt(l, r) ? l : r;
+}
+
+static inline struct bpos bkey_max(struct bpos l, struct bpos r)
+{
+ return bkey_gt(l, r) ? l : r;
+}
+
+void bch2_bpos_swab(struct bpos *);
+void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
+
+static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
+{
+ return cmp_int(l.hi, r.hi) ?:
+ cmp_int(l.lo, r.lo);
+}
+
+#define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
+#define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL })
+
+static __always_inline int bversion_zero(struct bversion v)
+{
+ return !bversion_cmp(v, ZERO_VERSION);
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+/* statement expressions confusing unlikely()? */
+#define bkey_packed(_k) \
+ ({ EBUG_ON((_k)->format > KEY_FORMAT_CURRENT); \
+ (_k)->format != KEY_FORMAT_CURRENT; })
+#else
+#define bkey_packed(_k) ((_k)->format != KEY_FORMAT_CURRENT)
+#endif
+
+/*
+ * It's safe to treat an unpacked bkey as a packed one, but not the reverse
+ */
+static inline struct bkey_packed *bkey_to_packed(struct bkey_i *k)
+{
+ return (struct bkey_packed *) k;
+}
+
+static inline const struct bkey_packed *bkey_to_packed_c(const struct bkey_i *k)
+{
+ return (const struct bkey_packed *) k;
+}
+
+static inline struct bkey_i *packed_to_bkey(struct bkey_packed *k)
+{
+ return bkey_packed(k) ? NULL : (struct bkey_i *) k;
+}
+
+static inline const struct bkey *packed_to_bkey_c(const struct bkey_packed *k)
+{
+ return bkey_packed(k) ? NULL : (const struct bkey *) k;
+}
+
+static inline unsigned bkey_format_key_bits(const struct bkey_format *format)
+{
+ return format->bits_per_field[BKEY_FIELD_INODE] +
+ format->bits_per_field[BKEY_FIELD_OFFSET] +
+ format->bits_per_field[BKEY_FIELD_SNAPSHOT];
+}
+
+static inline struct bpos bpos_successor(struct bpos p)
+{
+ if (!++p.snapshot &&
+ !++p.offset &&
+ !++p.inode)
+ BUG();
+
+ return p;
+}
+
+static inline struct bpos bpos_predecessor(struct bpos p)
+{
+ if (!p.snapshot-- &&
+ !p.offset-- &&
+ !p.inode--)
+ BUG();
+
+ return p;
+}
+
+static inline struct bpos bpos_nosnap_successor(struct bpos p)
+{
+ p.snapshot = 0;
+
+ if (!++p.offset &&
+ !++p.inode)
+ BUG();
+
+ return p;
+}
+
+static inline struct bpos bpos_nosnap_predecessor(struct bpos p)
+{
+ p.snapshot = 0;
+
+ if (!p.offset-- &&
+ !p.inode--)
+ BUG();
+
+ return p;
+}
+
+static inline u64 bkey_start_offset(const struct bkey *k)
+{
+ return k->p.offset - k->size;
+}
+
+static inline struct bpos bkey_start_pos(const struct bkey *k)
+{
+ return (struct bpos) {
+ .inode = k->p.inode,
+ .offset = bkey_start_offset(k),
+ .snapshot = k->p.snapshot,
+ };
+}
+
+/* Packed helpers */
+
+static inline unsigned bkeyp_key_u64s(const struct bkey_format *format,
+ const struct bkey_packed *k)
+{
+ unsigned ret = bkey_packed(k) ? format->key_u64s : BKEY_U64s;
+
+ EBUG_ON(k->u64s < ret);
+ return ret;
+}
+
+static inline unsigned bkeyp_key_bytes(const struct bkey_format *format,
+ const struct bkey_packed *k)
+{
+ return bkeyp_key_u64s(format, k) * sizeof(u64);
+}
+
+static inline unsigned bkeyp_val_u64s(const struct bkey_format *format,
+ const struct bkey_packed *k)
+{
+ return k->u64s - bkeyp_key_u64s(format, k);
+}
+
+static inline size_t bkeyp_val_bytes(const struct bkey_format *format,
+ const struct bkey_packed *k)
+{
+ return bkeyp_val_u64s(format, k) * sizeof(u64);
+}
+
+static inline void set_bkeyp_val_u64s(const struct bkey_format *format,
+ struct bkey_packed *k, unsigned val_u64s)
+{
+ k->u64s = bkeyp_key_u64s(format, k) + val_u64s;
+}
+
+#define bkeyp_val(_format, _k) \
+ ((struct bch_val *) ((u64 *) (_k)->_data + bkeyp_key_u64s(_format, _k)))
+
+extern const struct bkey_format bch2_bkey_format_current;
+
+bool bch2_bkey_transform(const struct bkey_format *,
+ struct bkey_packed *,
+ const struct bkey_format *,
+ const struct bkey_packed *);
+
+struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
+ const struct bkey_packed *);
+
+#ifndef HAVE_BCACHEFS_COMPILED_UNPACK
+struct bpos __bkey_unpack_pos(const struct bkey_format *,
+ const struct bkey_packed *);
+#endif
+
+bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *,
+ const struct bkey_format *);
+
+enum bkey_pack_pos_ret {
+ BKEY_PACK_POS_EXACT,
+ BKEY_PACK_POS_SMALLER,
+ BKEY_PACK_POS_FAIL,
+};
+
+enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos,
+ const struct btree *);
+
+static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in,
+ const struct btree *b)
+{
+ return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT;
+}
+
+void bch2_bkey_unpack(const struct btree *, struct bkey_i *,
+ const struct bkey_packed *);
+bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *,
+ const struct bkey_format *);
+
+typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *);
+
+static inline void
+__bkey_unpack_key_format_checked(const struct btree *b,
+ struct bkey *dst,
+ const struct bkey_packed *src)
+{
+ if (IS_ENABLED(HAVE_BCACHEFS_COMPILED_UNPACK)) {
+ compiled_unpack_fn unpack_fn = b->aux_data;
+ unpack_fn(dst, src);
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
+ bch2_expensive_debug_checks) {
+ struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
+
+ BUG_ON(memcmp(dst, &dst2, sizeof(*dst)));
+ }
+ } else {
+ *dst = __bch2_bkey_unpack_key(&b->format, src);
+ }
+}
+
+static inline struct bkey
+bkey_unpack_key_format_checked(const struct btree *b,
+ const struct bkey_packed *src)
+{
+ struct bkey dst;
+
+ __bkey_unpack_key_format_checked(b, &dst, src);
+ return dst;
+}
+
+static inline void __bkey_unpack_key(const struct btree *b,
+ struct bkey *dst,
+ const struct bkey_packed *src)
+{
+ if (likely(bkey_packed(src)))
+ __bkey_unpack_key_format_checked(b, dst, src);
+ else
+ *dst = *packed_to_bkey_c(src);
+}
+
+/**
+ * bkey_unpack_key -- unpack just the key, not the value
+ */
+static inline struct bkey bkey_unpack_key(const struct btree *b,
+ const struct bkey_packed *src)
+{
+ return likely(bkey_packed(src))
+ ? bkey_unpack_key_format_checked(b, src)
+ : *packed_to_bkey_c(src);
+}
+
+static inline struct bpos
+bkey_unpack_pos_format_checked(const struct btree *b,
+ const struct bkey_packed *src)
+{
+#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
+ return bkey_unpack_key_format_checked(b, src).p;
+#else
+ return __bkey_unpack_pos(&b->format, src);
+#endif
+}
+
+static inline struct bpos bkey_unpack_pos(const struct btree *b,
+ const struct bkey_packed *src)
+{
+ return likely(bkey_packed(src))
+ ? bkey_unpack_pos_format_checked(b, src)
+ : packed_to_bkey_c(src)->p;
+}
+
+/* Disassembled bkeys */
+
+static inline struct bkey_s_c bkey_disassemble(const struct btree *b,
+ const struct bkey_packed *k,
+ struct bkey *u)
+{
+ __bkey_unpack_key(b, u, k);
+
+ return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), };
+}
+
+/* non const version: */
+static inline struct bkey_s __bkey_disassemble(const struct btree *b,
+ struct bkey_packed *k,
+ struct bkey *u)
+{
+ __bkey_unpack_key(b, u, k);
+
+ return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), };
+}
+
+static inline u64 bkey_field_max(const struct bkey_format *f,
+ enum bch_bkey_fields nr)
+{
+ return f->bits_per_field[nr] < 64
+ ? (le64_to_cpu(f->field_offset[nr]) +
+ ~(~0ULL << f->bits_per_field[nr]))
+ : U64_MAX;
+}
+
+#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
+
+int bch2_compile_bkey_format(const struct bkey_format *, void *);
+
+#else
+
+static inline int bch2_compile_bkey_format(const struct bkey_format *format,
+ void *out) { return 0; }
+
+#endif
+
+static inline void bkey_reassemble(struct bkey_i *dst,
+ struct bkey_s_c src)
+{
+ dst->k = *src.k;
+ memcpy_u64s_small(&dst->v, src.v, bkey_val_u64s(src.k));
+}
+
+#define bkey_s_null ((struct bkey_s) { .k = NULL })
+#define bkey_s_c_null ((struct bkey_s_c) { .k = NULL })
+
+#define bkey_s_err(err) ((struct bkey_s) { .k = ERR_PTR(err) })
+#define bkey_s_c_err(err) ((struct bkey_s_c) { .k = ERR_PTR(err) })
+
+static inline struct bkey_s bkey_to_s(struct bkey *k)
+{
+ return (struct bkey_s) { .k = k, .v = NULL };
+}
+
+static inline struct bkey_s_c bkey_to_s_c(const struct bkey *k)
+{
+ return (struct bkey_s_c) { .k = k, .v = NULL };
+}
+
+static inline struct bkey_s bkey_i_to_s(struct bkey_i *k)
+{
+ return (struct bkey_s) { .k = &k->k, .v = &k->v };
+}
+
+static inline struct bkey_s_c bkey_i_to_s_c(const struct bkey_i *k)
+{
+ return (struct bkey_s_c) { .k = &k->k, .v = &k->v };
+}
+
+/*
+ * For a given type of value (e.g. struct bch_extent), generates the types for
+ * bkey + bch_extent - inline, split, split const - and also all the conversion
+ * functions, which also check that the value is of the correct type.
+ *
+ * We use anonymous unions for upcasting - e.g. converting from e.g. a
+ * bkey_i_extent to a bkey_i - since that's always safe, instead of conversion
+ * functions.
+ */
+#define x(name, ...) \
+struct bkey_i_##name { \
+ union { \
+ struct bkey k; \
+ struct bkey_i k_i; \
+ }; \
+ struct bch_##name v; \
+}; \
+ \
+struct bkey_s_c_##name { \
+ union { \
+ struct { \
+ const struct bkey *k; \
+ const struct bch_##name *v; \
+ }; \
+ struct bkey_s_c s_c; \
+ }; \
+}; \
+ \
+struct bkey_s_##name { \
+ union { \
+ struct { \
+ struct bkey *k; \
+ struct bch_##name *v; \
+ }; \
+ struct bkey_s_c_##name c; \
+ struct bkey_s s; \
+ struct bkey_s_c s_c; \
+ }; \
+}; \
+ \
+static inline struct bkey_i_##name *bkey_i_to_##name(struct bkey_i *k) \
+{ \
+ EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
+ return container_of(&k->k, struct bkey_i_##name, k); \
+} \
+ \
+static inline const struct bkey_i_##name * \
+bkey_i_to_##name##_c(const struct bkey_i *k) \
+{ \
+ EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
+ return container_of(&k->k, struct bkey_i_##name, k); \
+} \
+ \
+static inline struct bkey_s_##name bkey_s_to_##name(struct bkey_s k) \
+{ \
+ EBUG_ON(!IS_ERR_OR_NULL(k.k) && k.k->type != KEY_TYPE_##name); \
+ return (struct bkey_s_##name) { \
+ .k = k.k, \
+ .v = container_of(k.v, struct bch_##name, v), \
+ }; \
+} \
+ \
+static inline struct bkey_s_c_##name bkey_s_c_to_##name(struct bkey_s_c k)\
+{ \
+ EBUG_ON(!IS_ERR_OR_NULL(k.k) && k.k->type != KEY_TYPE_##name); \
+ return (struct bkey_s_c_##name) { \
+ .k = k.k, \
+ .v = container_of(k.v, struct bch_##name, v), \
+ }; \
+} \
+ \
+static inline struct bkey_s_##name name##_i_to_s(struct bkey_i_##name *k)\
+{ \
+ return (struct bkey_s_##name) { \
+ .k = &k->k, \
+ .v = &k->v, \
+ }; \
+} \
+ \
+static inline struct bkey_s_c_##name \
+name##_i_to_s_c(const struct bkey_i_##name *k) \
+{ \
+ return (struct bkey_s_c_##name) { \
+ .k = &k->k, \
+ .v = &k->v, \
+ }; \
+} \
+ \
+static inline struct bkey_s_##name bkey_i_to_s_##name(struct bkey_i *k) \
+{ \
+ EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
+ return (struct bkey_s_##name) { \
+ .k = &k->k, \
+ .v = container_of(&k->v, struct bch_##name, v), \
+ }; \
+} \
+ \
+static inline struct bkey_s_c_##name \
+bkey_i_to_s_c_##name(const struct bkey_i *k) \
+{ \
+ EBUG_ON(!IS_ERR_OR_NULL(k) && k->k.type != KEY_TYPE_##name); \
+ return (struct bkey_s_c_##name) { \
+ .k = &k->k, \
+ .v = container_of(&k->v, struct bch_##name, v), \
+ }; \
+} \
+ \
+static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\
+{ \
+ struct bkey_i_##name *k = \
+ container_of(&_k->k, struct bkey_i_##name, k); \
+ \
+ bkey_init(&k->k); \
+ memset(&k->v, 0, sizeof(k->v)); \
+ k->k.type = KEY_TYPE_##name; \
+ set_bkey_val_bytes(&k->k, sizeof(k->v)); \
+ \
+ return k; \
+}
+
+BCH_BKEY_TYPES();
+#undef x
+
+/* byte order helpers */
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+
+static inline unsigned high_word_offset(const struct bkey_format *f)
+{
+ return f->key_u64s - 1;
+}
+
+#define high_bit_offset 0
+#define nth_word(p, n) ((p) - (n))
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+static inline unsigned high_word_offset(const struct bkey_format *f)
+{
+ return 0;
+}
+
+#define high_bit_offset KEY_PACKED_BITS_START
+#define nth_word(p, n) ((p) + (n))
+
+#else
+#error edit for your odd byteorder.
+#endif
+
+#define high_word(f, k) ((u64 *) (k)->_data + high_word_offset(f))
+#define next_word(p) nth_word(p, 1)
+#define prev_word(p) nth_word(p, -1)
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_bkey_pack_test(void);
+#else
+static inline void bch2_bkey_pack_test(void) {}
+#endif
+
+#define bkey_fields() \
+ x(BKEY_FIELD_INODE, p.inode) \
+ x(BKEY_FIELD_OFFSET, p.offset) \
+ x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
+ x(BKEY_FIELD_SIZE, size) \
+ x(BKEY_FIELD_VERSION_HI, version.hi) \
+ x(BKEY_FIELD_VERSION_LO, version.lo)
+
+struct bkey_format_state {
+ u64 field_min[BKEY_NR_FIELDS];
+ u64 field_max[BKEY_NR_FIELDS];
+};
+
+void bch2_bkey_format_init(struct bkey_format_state *);
+
+static inline void __bkey_format_add(struct bkey_format_state *s, unsigned field, u64 v)
+{
+ s->field_min[field] = min(s->field_min[field], v);
+ s->field_max[field] = max(s->field_max[field], v);
+}
+
+/*
+ * Changes @format so that @k can be successfully packed with @format
+ */
+static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
+{
+#define x(id, field) __bkey_format_add(s, id, k->field);
+ bkey_fields()
+#undef x
+}
+
+void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
+struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
+int bch2_bkey_format_invalid(struct bch_fs *, struct bkey_format *,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_bkey_format_to_text(struct printbuf *, const struct bkey_format *);
+
+#endif /* _BCACHEFS_BKEY_H */
diff --git a/fs/bcachefs/bkey_buf.h b/fs/bcachefs/bkey_buf.h
new file mode 100644
index 000000000000..a30c4ae8eb36
--- /dev/null
+++ b/fs/bcachefs/bkey_buf.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BKEY_BUF_H
+#define _BCACHEFS_BKEY_BUF_H
+
+#include "bcachefs.h"
+#include "bkey.h"
+
+struct bkey_buf {
+ struct bkey_i *k;
+ u64 onstack[12];
+};
+
+static inline void bch2_bkey_buf_realloc(struct bkey_buf *s,
+ struct bch_fs *c, unsigned u64s)
+{
+ if (s->k == (void *) s->onstack &&
+ u64s > ARRAY_SIZE(s->onstack)) {
+ s->k = mempool_alloc(&c->large_bkey_pool, GFP_NOFS);
+ memcpy(s->k, s->onstack, sizeof(s->onstack));
+ }
+}
+
+static inline void bch2_bkey_buf_reassemble(struct bkey_buf *s,
+ struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_buf_realloc(s, c, k.k->u64s);
+ bkey_reassemble(s->k, k);
+}
+
+static inline void bch2_bkey_buf_copy(struct bkey_buf *s,
+ struct bch_fs *c,
+ struct bkey_i *src)
+{
+ bch2_bkey_buf_realloc(s, c, src->k.u64s);
+ bkey_copy(s->k, src);
+}
+
+static inline void bch2_bkey_buf_unpack(struct bkey_buf *s,
+ struct bch_fs *c,
+ struct btree *b,
+ struct bkey_packed *src)
+{
+ bch2_bkey_buf_realloc(s, c, BKEY_U64s +
+ bkeyp_val_u64s(&b->format, src));
+ bch2_bkey_unpack(b, s->k, src);
+}
+
+static inline void bch2_bkey_buf_init(struct bkey_buf *s)
+{
+ s->k = (void *) s->onstack;
+}
+
+static inline void bch2_bkey_buf_exit(struct bkey_buf *s, struct bch_fs *c)
+{
+ if (s->k != (void *) s->onstack)
+ mempool_free(s->k, &c->large_bkey_pool);
+ s->k = NULL;
+}
+
+#endif /* _BCACHEFS_BKEY_BUF_H */
diff --git a/fs/bcachefs/bkey_cmp.h b/fs/bcachefs/bkey_cmp.h
new file mode 100644
index 000000000000..5f42a6e69360
--- /dev/null
+++ b/fs/bcachefs/bkey_cmp.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BKEY_CMP_H
+#define _BCACHEFS_BKEY_CMP_H
+
+#include "bkey.h"
+
+#ifdef CONFIG_X86_64
+static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
+ unsigned nr_key_bits)
+{
+ long d0, d1, d2, d3;
+ int cmp;
+
+ /* we shouldn't need asm for this, but gcc is being retarded: */
+
+ asm(".intel_syntax noprefix;"
+ "xor eax, eax;"
+ "xor edx, edx;"
+ "1:;"
+ "mov r8, [rdi];"
+ "mov r9, [rsi];"
+ "sub ecx, 64;"
+ "jl 2f;"
+
+ "cmp r8, r9;"
+ "jnz 3f;"
+
+ "lea rdi, [rdi - 8];"
+ "lea rsi, [rsi - 8];"
+ "jmp 1b;"
+
+ "2:;"
+ "not ecx;"
+ "shr r8, 1;"
+ "shr r9, 1;"
+ "shr r8, cl;"
+ "shr r9, cl;"
+ "cmp r8, r9;"
+
+ "3:\n"
+ "seta al;"
+ "setb dl;"
+ "sub eax, edx;"
+ ".att_syntax prefix;"
+ : "=&D" (d0), "=&S" (d1), "=&d" (d2), "=&c" (d3), "=&a" (cmp)
+ : "0" (l), "1" (r), "3" (nr_key_bits)
+ : "r8", "r9", "cc", "memory");
+
+ return cmp;
+}
+#else
+static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
+ unsigned nr_key_bits)
+{
+ u64 l_v, r_v;
+
+ if (!nr_key_bits)
+ return 0;
+
+ /* for big endian, skip past header */
+ nr_key_bits += high_bit_offset;
+ l_v = *l & (~0ULL >> high_bit_offset);
+ r_v = *r & (~0ULL >> high_bit_offset);
+
+ while (1) {
+ if (nr_key_bits < 64) {
+ l_v >>= 64 - nr_key_bits;
+ r_v >>= 64 - nr_key_bits;
+ nr_key_bits = 0;
+ } else {
+ nr_key_bits -= 64;
+ }
+
+ if (!nr_key_bits || l_v != r_v)
+ break;
+
+ l = next_word(l);
+ r = next_word(r);
+
+ l_v = *l;
+ r_v = *r;
+ }
+
+ return cmp_int(l_v, r_v);
+}
+#endif
+
+static inline __pure __flatten
+int __bch2_bkey_cmp_packed_format_checked_inlined(const struct bkey_packed *l,
+ const struct bkey_packed *r,
+ const struct btree *b)
+{
+ const struct bkey_format *f = &b->format;
+ int ret;
+
+ EBUG_ON(!bkey_packed(l) || !bkey_packed(r));
+ EBUG_ON(b->nr_key_bits != bkey_format_key_bits(f));
+
+ ret = __bkey_cmp_bits(high_word(f, l),
+ high_word(f, r),
+ b->nr_key_bits);
+
+ EBUG_ON(ret != bpos_cmp(bkey_unpack_pos(b, l),
+ bkey_unpack_pos(b, r)));
+ return ret;
+}
+
+static inline __pure __flatten
+int bch2_bkey_cmp_packed_inlined(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r)
+{
+ struct bkey unpacked;
+
+ if (likely(bkey_packed(l) && bkey_packed(r)))
+ return __bch2_bkey_cmp_packed_format_checked_inlined(l, r, b);
+
+ if (bkey_packed(l)) {
+ __bkey_unpack_key_format_checked(b, &unpacked, l);
+ l = (void *) &unpacked;
+ } else if (bkey_packed(r)) {
+ __bkey_unpack_key_format_checked(b, &unpacked, r);
+ r = (void *) &unpacked;
+ }
+
+ return bpos_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
+}
+
+#endif /* _BCACHEFS_BKEY_CMP_H */
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
new file mode 100644
index 000000000000..d9fb1fc81f1e
--- /dev/null
+++ b/fs/bcachefs/bkey_methods.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "backpointers.h"
+#include "bkey_methods.h"
+#include "btree_types.h"
+#include "alloc_background.h"
+#include "dirent.h"
+#include "ec.h"
+#include "error.h"
+#include "extents.h"
+#include "inode.h"
+#include "io_misc.h"
+#include "lru.h"
+#include "quota.h"
+#include "reflink.h"
+#include "snapshot.h"
+#include "subvolume.h"
+#include "xattr.h"
+
+const char * const bch2_bkey_types[] = {
+#define x(name, nr) #name,
+ BCH_BKEY_TYPES()
+#undef x
+ NULL
+};
+
+static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err)
+{
+ return 0;
+}
+
+#define bch2_bkey_ops_deleted ((struct bkey_ops) { \
+ .key_invalid = deleted_key_invalid, \
+})
+
+#define bch2_bkey_ops_whiteout ((struct bkey_ops) { \
+ .key_invalid = deleted_key_invalid, \
+})
+
+static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err)
+{
+ if (bkey_val_bytes(k.k)) {
+ prt_printf(err, "incorrect value size (%zu != 0)",
+ bkey_val_bytes(k.k));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+#define bch2_bkey_ops_error ((struct bkey_ops) { \
+ .key_invalid = empty_val_key_invalid, \
+})
+
+static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err)
+{
+ return 0;
+}
+
+#define bch2_bkey_ops_cookie ((struct bkey_ops) { \
+ .key_invalid = key_type_cookie_invalid, \
+ .min_val_size = 8, \
+})
+
+#define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
+ .key_invalid = empty_val_key_invalid, \
+})
+
+static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err)
+{
+ return 0;
+}
+
+static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
+ unsigned datalen = bkey_inline_data_bytes(k.k);
+
+ prt_printf(out, "datalen %u: %*phN",
+ datalen, min(datalen, 32U), d.v->data);
+}
+
+#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
+ .key_invalid = key_type_inline_data_invalid, \
+ .val_to_text = key_type_inline_data_to_text, \
+})
+
+static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err)
+{
+ if (bkey_val_bytes(k.k)) {
+ prt_printf(err, "incorrect value size (%zu != %zu)",
+ bkey_val_bytes(k.k), sizeof(struct bch_cookie));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
+{
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+ return true;
+}
+
+#define bch2_bkey_ops_set ((struct bkey_ops) { \
+ .key_invalid = key_type_set_invalid, \
+ .key_merge = key_type_set_merge, \
+})
+
+const struct bkey_ops bch2_bkey_ops[] = {
+#define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name,
+ BCH_BKEY_TYPES()
+#undef x
+};
+
+const struct bkey_ops bch2_bkey_null_ops = {
+};
+
+int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
+
+ if (bkey_val_bytes(k.k) < ops->min_val_size) {
+ prt_printf(err, "bad val size (%zu < %u)",
+ bkey_val_bytes(k.k), ops->min_val_size);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (!ops->key_invalid)
+ return 0;
+
+ return ops->key_invalid(c, k, flags, err);
+}
+
+static u64 bch2_key_types_allowed[] = {
+#define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
+ BCH_BTREE_IDS()
+#undef x
+ [BKEY_TYPE_btree] =
+ BIT_ULL(KEY_TYPE_deleted)|
+ BIT_ULL(KEY_TYPE_btree_ptr)|
+ BIT_ULL(KEY_TYPE_btree_ptr_v2),
+};
+
+int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
+ enum btree_node_type type,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (k.k->u64s < BKEY_U64s) {
+ prt_printf(err, "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (flags & BKEY_INVALID_COMMIT &&
+ !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type))) {
+ prt_printf(err, "invalid key type for btree %s (%s)",
+ bch2_btree_ids[type], bch2_bkey_types[k.k->type]);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
+ if (k.k->size == 0) {
+ prt_printf(err, "size == 0");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (k.k->size > k.k->p.offset) {
+ prt_printf(err, "size greater than offset (%u > %llu)",
+ k.k->size, k.k->p.offset);
+ return -BCH_ERR_invalid_bkey;
+ }
+ } else {
+ if (k.k->size) {
+ prt_printf(err, "size != 0");
+ return -BCH_ERR_invalid_bkey;
+ }
+ }
+
+ if (type != BKEY_TYPE_btree) {
+ if (!btree_type_has_snapshots((enum btree_id) type) &&
+ k.k->p.snapshot) {
+ prt_printf(err, "nonzero snapshot");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (btree_type_has_snapshots((enum btree_id) type) &&
+ !k.k->p.snapshot) {
+ prt_printf(err, "snapshot == 0");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bkey_eq(k.k->p, POS_MAX)) {
+ prt_printf(err, "key at POS_MAX");
+ return -BCH_ERR_invalid_bkey;
+ }
+ }
+
+ return 0;
+}
+
+int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
+ enum btree_node_type type,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ return __bch2_bkey_invalid(c, k, type, flags, err) ?:
+ bch2_bkey_val_invalid(c, k, flags, err);
+}
+
+int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
+ struct printbuf *err)
+{
+ if (bpos_lt(k.k->p, b->data->min_key)) {
+ prt_printf(err, "key before start of btree node");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bpos_gt(k.k->p, b->data->max_key)) {
+ prt_printf(err, "key past end of btree node");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
+{
+ if (bpos_eq(pos, POS_MIN))
+ prt_printf(out, "POS_MIN");
+ else if (bpos_eq(pos, POS_MAX))
+ prt_printf(out, "POS_MAX");
+ else if (bpos_eq(pos, SPOS_MAX))
+ prt_printf(out, "SPOS_MAX");
+ else {
+ if (pos.inode == U64_MAX)
+ prt_printf(out, "U64_MAX");
+ else
+ prt_printf(out, "%llu", pos.inode);
+ prt_printf(out, ":");
+ if (pos.offset == U64_MAX)
+ prt_printf(out, "U64_MAX");
+ else
+ prt_printf(out, "%llu", pos.offset);
+ prt_printf(out, ":");
+ if (pos.snapshot == U32_MAX)
+ prt_printf(out, "U32_MAX");
+ else
+ prt_printf(out, "%u", pos.snapshot);
+ }
+}
+
+void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
+{
+ if (k) {
+ prt_printf(out, "u64s %u type ", k->u64s);
+
+ if (k->type < KEY_TYPE_MAX)
+ prt_printf(out, "%s ", bch2_bkey_types[k->type]);
+ else
+ prt_printf(out, "%u ", k->type);
+
+ bch2_bpos_to_text(out, k->p);
+
+ prt_printf(out, " len %u ver %llu", k->size, k->version.lo);
+ } else {
+ prt_printf(out, "(null)");
+ }
+}
+
+void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
+
+ if (likely(ops->val_to_text))
+ ops->val_to_text(out, c, k);
+}
+
+void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_to_text(out, k.k);
+
+ if (bkey_val_bytes(k.k)) {
+ prt_printf(out, ": ");
+ bch2_val_to_text(out, c, k);
+ }
+}
+
+void bch2_bkey_swab_val(struct bkey_s k)
+{
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
+
+ if (ops->swab)
+ ops->swab(k);
+}
+
+bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
+{
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
+
+ return ops->key_normalize
+ ? ops->key_normalize(c, k)
+ : false;
+}
+
+bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
+{
+ const struct bkey_ops *ops = bch2_bkey_type_ops(l.k->type);
+
+ return ops->key_merge &&
+ bch2_bkey_maybe_mergable(l.k, r.k) &&
+ (u64) l.k->size + r.k->size <= KEY_SIZE_MAX &&
+ !bch2_key_merging_disabled &&
+ ops->key_merge(c, l, r);
+}
+
+static const struct old_bkey_type {
+ u8 btree_node_type;
+ u8 old;
+ u8 new;
+} bkey_renumber_table[] = {
+ {BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr },
+ {BKEY_TYPE_extents, 128, KEY_TYPE_extent },
+ {BKEY_TYPE_extents, 129, KEY_TYPE_extent },
+ {BKEY_TYPE_extents, 130, KEY_TYPE_reservation },
+ {BKEY_TYPE_inodes, 128, KEY_TYPE_inode },
+ {BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation },
+ {BKEY_TYPE_dirents, 128, KEY_TYPE_dirent },
+ {BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout },
+ {BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr },
+ {BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout },
+ {BKEY_TYPE_alloc, 128, KEY_TYPE_alloc },
+ {BKEY_TYPE_quotas, 128, KEY_TYPE_quota },
+};
+
+void bch2_bkey_renumber(enum btree_node_type btree_node_type,
+ struct bkey_packed *k,
+ int write)
+{
+ const struct old_bkey_type *i;
+
+ for (i = bkey_renumber_table;
+ i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table);
+ i++)
+ if (btree_node_type == i->btree_node_type &&
+ k->type == (write ? i->new : i->old)) {
+ k->type = write ? i->old : i->new;
+ break;
+ }
+}
+
+void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
+ unsigned version, unsigned big_endian,
+ int write,
+ struct bkey_format *f,
+ struct bkey_packed *k)
+{
+ const struct bkey_ops *ops;
+ struct bkey uk;
+ unsigned nr_compat = 5;
+ int i;
+
+ /*
+ * Do these operations in reverse order in the write path:
+ */
+
+ for (i = 0; i < nr_compat; i++)
+ switch (!write ? i : nr_compat - 1 - i) {
+ case 0:
+ if (big_endian != CPU_BIG_ENDIAN)
+ bch2_bkey_swab_key(f, k);
+ break;
+ case 1:
+ if (version < bcachefs_metadata_version_bkey_renumber)
+ bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write);
+ break;
+ case 2:
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_id == BTREE_ID_inodes) {
+ if (!bkey_packed(k)) {
+ struct bkey_i *u = packed_to_bkey(k);
+
+ swap(u->k.p.inode, u->k.p.offset);
+ } else if (f->bits_per_field[BKEY_FIELD_INODE] &&
+ f->bits_per_field[BKEY_FIELD_OFFSET]) {
+ struct bkey_format tmp = *f, *in = f, *out = &tmp;
+
+ swap(tmp.bits_per_field[BKEY_FIELD_INODE],
+ tmp.bits_per_field[BKEY_FIELD_OFFSET]);
+ swap(tmp.field_offset[BKEY_FIELD_INODE],
+ tmp.field_offset[BKEY_FIELD_OFFSET]);
+
+ if (!write)
+ swap(in, out);
+
+ uk = __bch2_bkey_unpack_key(in, k);
+ swap(uk.p.inode, uk.p.offset);
+ BUG_ON(!bch2_bkey_pack_key(k, &uk, out));
+ }
+ }
+ break;
+ case 3:
+ if (version < bcachefs_metadata_version_snapshot &&
+ (level || btree_type_has_snapshots(btree_id))) {
+ struct bkey_i *u = packed_to_bkey(k);
+
+ if (u) {
+ u->k.p.snapshot = write
+ ? 0 : U32_MAX;
+ } else {
+ u64 min_packed = le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]);
+ u64 max_packed = min_packed +
+ ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
+
+ uk = __bch2_bkey_unpack_key(f, k);
+ uk.p.snapshot = write
+ ? min_packed : min_t(u64, U32_MAX, max_packed);
+
+ BUG_ON(!bch2_bkey_pack_key(k, &uk, f));
+ }
+ }
+
+ break;
+ case 4: {
+ struct bkey_s u;
+
+ if (!bkey_packed(k)) {
+ u = bkey_i_to_s(packed_to_bkey(k));
+ } else {
+ uk = __bch2_bkey_unpack_key(f, k);
+ u.k = &uk;
+ u.v = bkeyp_val(f, k);
+ }
+
+ if (big_endian != CPU_BIG_ENDIAN)
+ bch2_bkey_swab_val(u);
+
+ ops = bch2_bkey_type_ops(k->type);
+
+ if (ops->compat)
+ ops->compat(btree_id, version, big_endian, write, u);
+ break;
+ }
+ default:
+ BUG();
+ }
+}
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
new file mode 100644
index 000000000000..668f595e2fcf
--- /dev/null
+++ b/fs/bcachefs/bkey_methods.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BKEY_METHODS_H
+#define _BCACHEFS_BKEY_METHODS_H
+
+#include "bkey.h"
+
+struct bch_fs;
+struct btree;
+struct btree_trans;
+struct bkey;
+enum btree_node_type;
+
+extern const char * const bch2_bkey_types[];
+extern const struct bkey_ops bch2_bkey_null_ops;
+
+/*
+ * key_invalid: checks validity of @k, returns 0 if good or -EINVAL if bad. If
+ * invalid, entire key will be deleted.
+ *
+ * When invalid, error string is returned via @err. @rw indicates whether key is
+ * being read or written; more aggressive checks can be enabled when rw == WRITE.
+ */
+struct bkey_ops {
+ int (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err);
+ void (*val_to_text)(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+ void (*swab)(struct bkey_s);
+ bool (*key_normalize)(struct bch_fs *, struct bkey_s);
+ bool (*key_merge)(struct bch_fs *, struct bkey_s, struct bkey_s_c);
+ int (*trans_trigger)(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_i *, unsigned);
+ int (*atomic_trigger)(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+ void (*compat)(enum btree_id id, unsigned version,
+ unsigned big_endian, int write,
+ struct bkey_s);
+
+ /* Size of value type when first created: */
+ unsigned min_val_size;
+};
+
+extern const struct bkey_ops bch2_bkey_ops[];
+
+static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type)
+{
+ return likely(type < KEY_TYPE_MAX)
+ ? &bch2_bkey_ops[type]
+ : &bch2_bkey_null_ops;
+}
+
+int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c, struct printbuf *);
+
+void bch2_bpos_to_text(struct printbuf *, struct bpos);
+void bch2_bkey_to_text(struct printbuf *, const struct bkey *);
+void bch2_val_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+void bch2_bkey_val_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+
+void bch2_bkey_swab_val(struct bkey_s);
+
+bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s);
+
+static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct bkey *r)
+{
+ return l->type == r->type &&
+ !bversion_cmp(l->version, r->version) &&
+ bpos_eq(l->p, bkey_start_pos(r));
+}
+
+bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
+
+static inline int bch2_mark_key(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new.k->type);
+
+ return ops->atomic_trigger
+ ? ops->atomic_trigger(trans, btree, level, old, new, flags)
+ : 0;
+}
+
+enum btree_update_flags {
+ __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
+ __BTREE_UPDATE_NOJOURNAL,
+ __BTREE_UPDATE_PREJOURNAL,
+ __BTREE_UPDATE_KEY_CACHE_RECLAIM,
+
+ __BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
+
+ __BTREE_TRIGGER_INSERT,
+ __BTREE_TRIGGER_OVERWRITE,
+
+ __BTREE_TRIGGER_GC,
+ __BTREE_TRIGGER_BUCKET_INVALIDATE,
+ __BTREE_TRIGGER_NOATOMIC,
+};
+
+#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
+#define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL)
+#define BTREE_UPDATE_PREJOURNAL (1U << __BTREE_UPDATE_PREJOURNAL)
+#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
+
+#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
+
+#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
+#define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
+
+#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
+#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
+#define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
+
+#define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
+ ((1U << KEY_TYPE_alloc)| \
+ (1U << KEY_TYPE_alloc_v2)| \
+ (1U << KEY_TYPE_alloc_v3)| \
+ (1U << KEY_TYPE_alloc_v4)| \
+ (1U << KEY_TYPE_stripe)| \
+ (1U << KEY_TYPE_inode)| \
+ (1U << KEY_TYPE_inode_v2)| \
+ (1U << KEY_TYPE_snapshot))
+
+static inline int bch2_trans_mark_key(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new->k.type);
+
+ return ops->trans_trigger
+ ? ops->trans_trigger(trans, btree_id, level, old, new, flags)
+ : 0;
+}
+
+static inline int bch2_trans_mark_old(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, unsigned flags)
+{
+ struct bkey_i deleted;
+
+ bkey_init(&deleted.k);
+ deleted.k.p = old.k->p;
+
+ return bch2_trans_mark_key(trans, btree_id, level, old, &deleted,
+ BTREE_TRIGGER_OVERWRITE|flags);
+}
+
+static inline int bch2_trans_mark_new(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_i *new, unsigned flags)
+{
+ struct bkey_i deleted;
+
+ bkey_init(&deleted.k);
+ deleted.k.p = new->k.p;
+
+ return bch2_trans_mark_key(trans, btree_id, level, bkey_i_to_s_c(&deleted), new,
+ BTREE_TRIGGER_INSERT|flags);
+}
+
+void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
+
+void __bch2_bkey_compat(unsigned, enum btree_id, unsigned, unsigned,
+ int, struct bkey_format *, struct bkey_packed *);
+
+static inline void bch2_bkey_compat(unsigned level, enum btree_id btree_id,
+ unsigned version, unsigned big_endian,
+ int write,
+ struct bkey_format *f,
+ struct bkey_packed *k)
+{
+ if (version < bcachefs_metadata_version_current ||
+ big_endian != CPU_BIG_ENDIAN)
+ __bch2_bkey_compat(level, btree_id, version,
+ big_endian, write, f, k);
+
+}
+
+#endif /* _BCACHEFS_BKEY_METHODS_H */
diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c
new file mode 100644
index 000000000000..b9aa027c881b
--- /dev/null
+++ b/fs/bcachefs/bkey_sort.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "bkey_cmp.h"
+#include "bkey_sort.h"
+#include "bset.h"
+#include "extents.h"
+
+typedef int (*sort_cmp_fn)(struct btree *,
+ struct bkey_packed *,
+ struct bkey_packed *);
+
+static inline bool sort_iter_end(struct sort_iter *iter)
+{
+ return !iter->used;
+}
+
+static inline void sort_iter_sift(struct sort_iter *iter, unsigned from,
+ sort_cmp_fn cmp)
+{
+ unsigned i;
+
+ for (i = from;
+ i + 1 < iter->used &&
+ cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
+ i++)
+ swap(iter->data[i], iter->data[i + 1]);
+}
+
+static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
+{
+ unsigned i = iter->used;
+
+ while (i--)
+ sort_iter_sift(iter, i, cmp);
+}
+
+static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
+{
+ return !sort_iter_end(iter) ? iter->data->k : NULL;
+}
+
+static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
+{
+ struct sort_iter_set *i = iter->data;
+
+ BUG_ON(!iter->used);
+
+ i->k = bkey_p_next(i->k);
+
+ BUG_ON(i->k > i->end);
+
+ if (i->k == i->end)
+ array_remove_item(iter->data, iter->used, 0);
+ else
+ sort_iter_sift(iter, 0, cmp);
+}
+
+static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
+ sort_cmp_fn cmp)
+{
+ struct bkey_packed *ret = sort_iter_peek(iter);
+
+ if (ret)
+ sort_iter_advance(iter, cmp);
+
+ return ret;
+}
+
+/*
+ * If keys compare equal, compare by pointer order:
+ */
+static inline int key_sort_fix_overlapping_cmp(struct btree *b,
+ struct bkey_packed *l,
+ struct bkey_packed *r)
+{
+ return bch2_bkey_cmp_packed(b, l, r) ?:
+ cmp_int((unsigned long) l, (unsigned long) r);
+}
+
+static inline bool should_drop_next_key(struct sort_iter *iter)
+{
+ /*
+ * key_sort_cmp() ensures that when keys compare equal the older key
+ * comes first; so if l->k compares equal to r->k then l->k is older
+ * and should be dropped.
+ */
+ return iter->used >= 2 &&
+ !bch2_bkey_cmp_packed(iter->b,
+ iter->data[0].k,
+ iter->data[1].k);
+}
+
+struct btree_nr_keys
+bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
+ struct sort_iter *iter)
+{
+ struct bkey_packed *out = dst->start;
+ struct bkey_packed *k;
+ struct btree_nr_keys nr;
+
+ memset(&nr, 0, sizeof(nr));
+
+ sort_iter_sort(iter, key_sort_fix_overlapping_cmp);
+
+ while ((k = sort_iter_peek(iter))) {
+ if (!bkey_deleted(k) &&
+ !should_drop_next_key(iter)) {
+ bkey_copy(out, k);
+ btree_keys_account_key_add(&nr, 0, out);
+ out = bkey_p_next(out);
+ }
+
+ sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
+ }
+
+ dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
+ return nr;
+}
+
+/* Sort + repack in a new format: */
+struct btree_nr_keys
+bch2_sort_repack(struct bset *dst, struct btree *src,
+ struct btree_node_iter *src_iter,
+ struct bkey_format *out_f,
+ bool filter_whiteouts)
+{
+ struct bkey_format *in_f = &src->format;
+ struct bkey_packed *in, *out = vstruct_last(dst);
+ struct btree_nr_keys nr;
+ bool transform = memcmp(out_f, &src->format, sizeof(*out_f));
+
+ memset(&nr, 0, sizeof(nr));
+
+ while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
+ if (filter_whiteouts && bkey_deleted(in))
+ continue;
+
+ if (!transform)
+ bkey_copy(out, in);
+ else if (bch2_bkey_transform(out_f, out, bkey_packed(in)
+ ? in_f : &bch2_bkey_format_current, in))
+ out->format = KEY_FORMAT_LOCAL_BTREE;
+ else
+ bch2_bkey_unpack(src, (void *) out, in);
+
+ out->needs_whiteout = false;
+
+ btree_keys_account_key_add(&nr, 0, out);
+ out = bkey_p_next(out);
+ }
+
+ dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
+ return nr;
+}
+
+static inline int sort_keys_cmp(struct btree *b,
+ struct bkey_packed *l,
+ struct bkey_packed *r)
+{
+ return bch2_bkey_cmp_packed_inlined(b, l, r) ?:
+ (int) bkey_deleted(r) - (int) bkey_deleted(l) ?:
+ (int) l->needs_whiteout - (int) r->needs_whiteout;
+}
+
+unsigned bch2_sort_keys(struct bkey_packed *dst,
+ struct sort_iter *iter,
+ bool filter_whiteouts)
+{
+ const struct bkey_format *f = &iter->b->format;
+ struct bkey_packed *in, *next, *out = dst;
+
+ sort_iter_sort(iter, sort_keys_cmp);
+
+ while ((in = sort_iter_next(iter, sort_keys_cmp))) {
+ bool needs_whiteout = false;
+
+ if (bkey_deleted(in) &&
+ (filter_whiteouts || !in->needs_whiteout))
+ continue;
+
+ while ((next = sort_iter_peek(iter)) &&
+ !bch2_bkey_cmp_packed_inlined(iter->b, in, next)) {
+ BUG_ON(in->needs_whiteout &&
+ next->needs_whiteout);
+ needs_whiteout |= in->needs_whiteout;
+ in = sort_iter_next(iter, sort_keys_cmp);
+ }
+
+ if (bkey_deleted(in)) {
+ memcpy_u64s_small(out, in, bkeyp_key_u64s(f, in));
+ set_bkeyp_val_u64s(f, out, 0);
+ } else {
+ bkey_copy(out, in);
+ }
+ out->needs_whiteout |= needs_whiteout;
+ out = bkey_p_next(out);
+ }
+
+ return (u64 *) out - (u64 *) dst;
+}
diff --git a/fs/bcachefs/bkey_sort.h b/fs/bcachefs/bkey_sort.h
new file mode 100644
index 000000000000..7c0f0b160f18
--- /dev/null
+++ b/fs/bcachefs/bkey_sort.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BKEY_SORT_H
+#define _BCACHEFS_BKEY_SORT_H
+
+struct sort_iter {
+ struct btree *b;
+ unsigned used;
+ unsigned size;
+
+ struct sort_iter_set {
+ struct bkey_packed *k, *end;
+ } data[];
+};
+
+static inline void sort_iter_init(struct sort_iter *iter, struct btree *b, unsigned size)
+{
+ iter->b = b;
+ iter->used = 0;
+ iter->size = size;
+}
+
+struct sort_iter_stack {
+ struct sort_iter iter;
+ struct sort_iter_set sets[MAX_BSETS + 1];
+};
+
+static inline void sort_iter_stack_init(struct sort_iter_stack *iter, struct btree *b)
+{
+ sort_iter_init(&iter->iter, b, ARRAY_SIZE(iter->sets));
+}
+
+static inline void sort_iter_add(struct sort_iter *iter,
+ struct bkey_packed *k,
+ struct bkey_packed *end)
+{
+ BUG_ON(iter->used >= iter->size);
+
+ if (k != end)
+ iter->data[iter->used++] = (struct sort_iter_set) { k, end };
+}
+
+struct btree_nr_keys
+bch2_key_sort_fix_overlapping(struct bch_fs *, struct bset *,
+ struct sort_iter *);
+
+struct btree_nr_keys
+bch2_sort_repack(struct bset *, struct btree *,
+ struct btree_node_iter *,
+ struct bkey_format *, bool);
+
+unsigned bch2_sort_keys(struct bkey_packed *,
+ struct sort_iter *, bool);
+
+#endif /* _BCACHEFS_BKEY_SORT_H */
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
new file mode 100644
index 000000000000..bb73ba9017b0
--- /dev/null
+++ b/fs/bcachefs/bset.c
@@ -0,0 +1,1592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code for working with individual keys, and sorted sets of keys with in a
+ * btree node
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcachefs.h"
+#include "btree_cache.h"
+#include "bset.h"
+#include "eytzinger.h"
+#include "trace.h"
+#include "util.h"
+
+#include <asm/unaligned.h>
+#include <linux/console.h>
+#include <linux/random.h>
+#include <linux/prefetch.h>
+
+static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
+ struct btree *);
+
+static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
+{
+ unsigned n = ARRAY_SIZE(iter->data);
+
+ while (n && __btree_node_iter_set_end(iter, n - 1))
+ --n;
+
+ return n;
+}
+
+struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
+{
+ return bch2_bkey_to_bset_inlined(b, k);
+}
+
+/*
+ * There are never duplicate live keys in the btree - but including keys that
+ * have been flagged as deleted (and will be cleaned up later) we _will_ see
+ * duplicates.
+ *
+ * Thus the sort order is: usual key comparison first, but for keys that compare
+ * equal the deleted key(s) come first, and the (at most one) live version comes
+ * last.
+ *
+ * The main reason for this is insertion: to handle overwrites, we first iterate
+ * over keys that compare equal to our insert key, and then insert immediately
+ * prior to the first key greater than the key we're inserting - our insert
+ * position will be after all keys that compare equal to our insert key, which
+ * by the time we actually do the insert will all be deleted.
+ */
+
+void bch2_dump_bset(struct bch_fs *c, struct btree *b,
+ struct bset *i, unsigned set)
+{
+ struct bkey_packed *_k, *_n;
+ struct bkey uk, n;
+ struct bkey_s_c k;
+ struct printbuf buf = PRINTBUF;
+
+ if (!i->u64s)
+ return;
+
+ for (_k = i->start;
+ _k < vstruct_last(i);
+ _k = _n) {
+ _n = bkey_p_next(_k);
+
+ k = bkey_disassemble(b, _k, &uk);
+
+ printbuf_reset(&buf);
+ if (c)
+ bch2_bkey_val_to_text(&buf, c, k);
+ else
+ bch2_bkey_to_text(&buf, k.k);
+ printk(KERN_ERR "block %u key %5zu: %s\n", set,
+ _k->_data - i->_data, buf.buf);
+
+ if (_n == vstruct_last(i))
+ continue;
+
+ n = bkey_unpack_key(b, _n);
+
+ if (bpos_lt(n.p, k.k->p)) {
+ printk(KERN_ERR "Key skipped backwards\n");
+ continue;
+ }
+
+ if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
+ printk(KERN_ERR "Duplicate keys\n");
+ }
+
+ printbuf_exit(&buf);
+}
+
+void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
+{
+ struct bset_tree *t;
+
+ console_lock();
+ for_each_bset(b, t)
+ bch2_dump_bset(c, b, bset(b, t), t - b->set);
+ console_unlock();
+}
+
+void bch2_dump_btree_node_iter(struct btree *b,
+ struct btree_node_iter *iter)
+{
+ struct btree_node_iter_set *set;
+ struct printbuf buf = PRINTBUF;
+
+ printk(KERN_ERR "btree node iter with %u/%u sets:\n",
+ __btree_node_iter_used(iter), b->nsets);
+
+ btree_node_iter_for_each(iter, set) {
+ struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
+ struct bset_tree *t = bch2_bkey_to_bset(b, k);
+ struct bkey uk = bkey_unpack_key(b, k);
+
+ printbuf_reset(&buf);
+ bch2_bkey_to_text(&buf, &uk);
+ printk(KERN_ERR "set %zu key %u: %s\n",
+ t - b->set, set->k, buf.buf);
+ }
+
+ printbuf_exit(&buf);
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+void __bch2_verify_btree_nr_keys(struct btree *b)
+{
+ struct bset_tree *t;
+ struct bkey_packed *k;
+ struct btree_nr_keys nr = { 0 };
+
+ for_each_bset(b, t)
+ bset_tree_for_each_key(b, t, k)
+ if (!bkey_deleted(k))
+ btree_keys_account_key_add(&nr, t - b->set, k);
+
+ BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
+}
+
+static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
+ struct btree *b)
+{
+ struct btree_node_iter iter = *_iter;
+ const struct bkey_packed *k, *n;
+
+ k = bch2_btree_node_iter_peek_all(&iter, b);
+ __bch2_btree_node_iter_advance(&iter, b);
+ n = bch2_btree_node_iter_peek_all(&iter, b);
+
+ bkey_unpack_key(b, k);
+
+ if (n &&
+ bkey_iter_cmp(b, k, n) > 0) {
+ struct btree_node_iter_set *set;
+ struct bkey ku = bkey_unpack_key(b, k);
+ struct bkey nu = bkey_unpack_key(b, n);
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+
+ bch2_dump_btree_node(NULL, b);
+ bch2_bkey_to_text(&buf1, &ku);
+ bch2_bkey_to_text(&buf2, &nu);
+ printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
+ buf1.buf, buf2.buf);
+ printk(KERN_ERR "iter was:");
+
+ btree_node_iter_for_each(_iter, set) {
+ struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
+ struct bset_tree *t = bch2_bkey_to_bset(b, k2);
+ printk(" [%zi %zi]", t - b->set,
+ k2->_data - bset(b, t)->_data);
+ }
+ panic("\n");
+ }
+}
+
+void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ struct btree_node_iter_set *set, *s2;
+ struct bkey_packed *k, *p;
+ struct bset_tree *t;
+
+ if (bch2_btree_node_iter_end(iter))
+ return;
+
+ /* Verify no duplicates: */
+ btree_node_iter_for_each(iter, set) {
+ BUG_ON(set->k > set->end);
+ btree_node_iter_for_each(iter, s2)
+ BUG_ON(set != s2 && set->end == s2->end);
+ }
+
+ /* Verify that set->end is correct: */
+ btree_node_iter_for_each(iter, set) {
+ for_each_bset(b, t)
+ if (set->end == t->end_offset)
+ goto found;
+ BUG();
+found:
+ BUG_ON(set->k < btree_bkey_first_offset(t) ||
+ set->k >= t->end_offset);
+ }
+
+ /* Verify iterator is sorted: */
+ btree_node_iter_for_each(iter, set)
+ BUG_ON(set != iter->data &&
+ btree_node_iter_cmp(b, set[-1], set[0]) > 0);
+
+ k = bch2_btree_node_iter_peek_all(iter, b);
+
+ for_each_bset(b, t) {
+ if (iter->data[0].end == t->end_offset)
+ continue;
+
+ p = bch2_bkey_prev_all(b, t,
+ bch2_btree_node_iter_bset_pos(iter, b, t));
+
+ BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
+ }
+}
+
+void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
+ struct bkey_packed *insert, unsigned clobber_u64s)
+{
+ struct bset_tree *t = bch2_bkey_to_bset(b, where);
+ struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
+ struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+#if 0
+ BUG_ON(prev &&
+ bkey_iter_cmp(b, prev, insert) > 0);
+#else
+ if (prev &&
+ bkey_iter_cmp(b, prev, insert) > 0) {
+ struct bkey k1 = bkey_unpack_key(b, prev);
+ struct bkey k2 = bkey_unpack_key(b, insert);
+
+ bch2_dump_btree_node(NULL, b);
+ bch2_bkey_to_text(&buf1, &k1);
+ bch2_bkey_to_text(&buf2, &k2);
+
+ panic("prev > insert:\n"
+ "prev key %s\n"
+ "insert key %s\n",
+ buf1.buf, buf2.buf);
+ }
+#endif
+#if 0
+ BUG_ON(next != btree_bkey_last(b, t) &&
+ bkey_iter_cmp(b, insert, next) > 0);
+#else
+ if (next != btree_bkey_last(b, t) &&
+ bkey_iter_cmp(b, insert, next) > 0) {
+ struct bkey k1 = bkey_unpack_key(b, insert);
+ struct bkey k2 = bkey_unpack_key(b, next);
+
+ bch2_dump_btree_node(NULL, b);
+ bch2_bkey_to_text(&buf1, &k1);
+ bch2_bkey_to_text(&buf2, &k2);
+
+ panic("insert > next:\n"
+ "insert key %s\n"
+ "next key %s\n",
+ buf1.buf, buf2.buf);
+ }
+#endif
+}
+
+#else
+
+static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
+ struct btree *b) {}
+
+#endif
+
+/* Auxiliary search trees */
+
+#define BFLOAT_FAILED_UNPACKED U8_MAX
+#define BFLOAT_FAILED U8_MAX
+
+struct bkey_float {
+ u8 exponent;
+ u8 key_offset;
+ u16 mantissa;
+};
+#define BKEY_MANTISSA_BITS 16
+
+static unsigned bkey_float_byte_offset(unsigned idx)
+{
+ return idx * sizeof(struct bkey_float);
+}
+
+struct ro_aux_tree {
+ u8 nothing[0];
+ struct bkey_float f[];
+};
+
+struct rw_aux_tree {
+ u16 offset;
+ struct bpos k;
+};
+
+static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
+{
+ BUG_ON(t->aux_data_offset == U16_MAX);
+
+ switch (bset_aux_tree_type(t)) {
+ case BSET_NO_AUX_TREE:
+ return t->aux_data_offset;
+ case BSET_RO_AUX_TREE:
+ return t->aux_data_offset +
+ DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
+ t->size * sizeof(u8), 8);
+ case BSET_RW_AUX_TREE:
+ return t->aux_data_offset +
+ DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
+ default:
+ BUG();
+ }
+}
+
+static unsigned bset_aux_tree_buf_start(const struct btree *b,
+ const struct bset_tree *t)
+{
+ return t == b->set
+ ? DIV_ROUND_UP(b->unpack_fn_len, 8)
+ : bset_aux_tree_buf_end(t - 1);
+}
+
+static void *__aux_tree_base(const struct btree *b,
+ const struct bset_tree *t)
+{
+ return b->aux_data + t->aux_data_offset * 8;
+}
+
+static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
+ const struct bset_tree *t)
+{
+ EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
+
+ return __aux_tree_base(b, t);
+}
+
+static u8 *ro_aux_tree_prev(const struct btree *b,
+ const struct bset_tree *t)
+{
+ EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
+
+ return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
+}
+
+static struct bkey_float *bkey_float(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned idx)
+{
+ return ro_aux_tree_base(b, t)->f + idx;
+}
+
+static void bset_aux_tree_verify(const struct btree *b)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ const struct bset_tree *t;
+
+ for_each_bset(b, t) {
+ if (t->aux_data_offset == U16_MAX)
+ continue;
+
+ BUG_ON(t != b->set &&
+ t[-1].aux_data_offset == U16_MAX);
+
+ BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
+ BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
+ BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
+ }
+#endif
+}
+
+void bch2_btree_keys_init(struct btree *b)
+{
+ unsigned i;
+
+ b->nsets = 0;
+ memset(&b->nr, 0, sizeof(b->nr));
+
+ for (i = 0; i < MAX_BSETS; i++)
+ b->set[i].data_offset = U16_MAX;
+
+ bch2_bset_set_no_aux_tree(b, b->set);
+}
+
+/* Binary tree stuff for auxiliary search trees */
+
+/*
+ * Cacheline/offset <-> bkey pointer arithmetic:
+ *
+ * t->tree is a binary search tree in an array; each node corresponds to a key
+ * in one cacheline in t->set (BSET_CACHELINE bytes).
+ *
+ * This means we don't have to store the full index of the key that a node in
+ * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
+ * then bkey_float->m gives us the offset within that cacheline, in units of 8
+ * bytes.
+ *
+ * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
+ * make this work.
+ *
+ * To construct the bfloat for an arbitrary key we need to know what the key
+ * immediately preceding it is: we have to check if the two keys differ in the
+ * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
+ * of the previous key so we can walk backwards to it from t->tree[j]'s key.
+ */
+
+static inline void *bset_cacheline(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned cacheline)
+{
+ return (void *) round_down((unsigned long) btree_bkey_first(b, t),
+ L1_CACHE_BYTES) +
+ cacheline * BSET_CACHELINE;
+}
+
+static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned cacheline,
+ unsigned offset)
+{
+ return bset_cacheline(b, t, cacheline) + offset * 8;
+}
+
+static unsigned bkey_to_cacheline(const struct btree *b,
+ const struct bset_tree *t,
+ const struct bkey_packed *k)
+{
+ return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
+}
+
+static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned cacheline,
+ const struct bkey_packed *k)
+{
+ return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
+}
+
+static unsigned bkey_to_cacheline_offset(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned cacheline,
+ const struct bkey_packed *k)
+{
+ size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
+
+ EBUG_ON(m > U8_MAX);
+ return m;
+}
+
+static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned j)
+{
+ return cacheline_to_bkey(b, t,
+ __eytzinger1_to_inorder(j, t->size - 1, t->extra),
+ bkey_float(b, t, j)->key_offset);
+}
+
+static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
+ const struct bset_tree *t,
+ unsigned j)
+{
+ unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
+
+ return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
+}
+
+static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
+ const struct bset_tree *t)
+{
+ EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
+
+ return __aux_tree_base(b, t);
+}
+
+/*
+ * For the write set - the one we're currently inserting keys into - we don't
+ * maintain a full search tree, we just keep a simple lookup table in t->prev.
+ */
+static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
+ struct bset_tree *t,
+ unsigned j)
+{
+ return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
+}
+
+static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
+ unsigned j, struct bkey_packed *k)
+{
+ EBUG_ON(k >= btree_bkey_last(b, t));
+
+ rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
+ .offset = __btree_node_key_to_offset(b, k),
+ .k = bkey_unpack_pos(b, k),
+ };
+}
+
+static void bch2_bset_verify_rw_aux_tree(struct btree *b,
+ struct bset_tree *t)
+{
+ struct bkey_packed *k = btree_bkey_first(b, t);
+ unsigned j = 0;
+
+ if (!bch2_expensive_debug_checks)
+ return;
+
+ BUG_ON(bset_has_ro_aux_tree(t));
+
+ if (!bset_has_rw_aux_tree(t))
+ return;
+
+ BUG_ON(t->size < 1);
+ BUG_ON(rw_aux_to_bkey(b, t, j) != k);
+
+ goto start;
+ while (1) {
+ if (rw_aux_to_bkey(b, t, j) == k) {
+ BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
+ bkey_unpack_pos(b, k)));
+start:
+ if (++j == t->size)
+ break;
+
+ BUG_ON(rw_aux_tree(b, t)[j].offset <=
+ rw_aux_tree(b, t)[j - 1].offset);
+ }
+
+ k = bkey_p_next(k);
+ BUG_ON(k >= btree_bkey_last(b, t));
+ }
+}
+
+/* returns idx of first entry >= offset: */
+static unsigned rw_aux_tree_bsearch(struct btree *b,
+ struct bset_tree *t,
+ unsigned offset)
+{
+ unsigned bset_offs = offset - btree_bkey_first_offset(t);
+ unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
+ unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
+
+ EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
+ EBUG_ON(!t->size);
+ EBUG_ON(idx > t->size);
+
+ while (idx < t->size &&
+ rw_aux_tree(b, t)[idx].offset < offset)
+ idx++;
+
+ while (idx &&
+ rw_aux_tree(b, t)[idx - 1].offset >= offset)
+ idx--;
+
+ EBUG_ON(idx < t->size &&
+ rw_aux_tree(b, t)[idx].offset < offset);
+ EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
+ EBUG_ON(idx + 1 < t->size &&
+ rw_aux_tree(b, t)[idx].offset ==
+ rw_aux_tree(b, t)[idx + 1].offset);
+
+ return idx;
+}
+
+static inline unsigned bkey_mantissa(const struct bkey_packed *k,
+ const struct bkey_float *f,
+ unsigned idx)
+{
+ u64 v;
+
+ EBUG_ON(!bkey_packed(k));
+
+ v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
+
+ /*
+ * In little endian, we're shifting off low bits (and then the bits we
+ * want are at the low end), in big endian we're shifting off high bits
+ * (and then the bits we want are at the high end, so we shift them
+ * back down):
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ v >>= f->exponent & 7;
+#else
+ v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
+#endif
+ return (u16) v;
+}
+
+static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
+ unsigned j,
+ struct bkey_packed *min_key,
+ struct bkey_packed *max_key)
+{
+ struct bkey_float *f = bkey_float(b, t, j);
+ struct bkey_packed *m = tree_to_bkey(b, t, j);
+ struct bkey_packed *l = is_power_of_2(j)
+ ? min_key
+ : tree_to_prev_bkey(b, t, j >> ffs(j));
+ struct bkey_packed *r = is_power_of_2(j + 1)
+ ? max_key
+ : tree_to_bkey(b, t, j >> (ffz(j) + 1));
+ unsigned mantissa;
+ int shift, exponent, high_bit;
+
+ /*
+ * for failed bfloats, the lookup code falls back to comparing against
+ * the original key.
+ */
+
+ if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
+ !b->nr_key_bits) {
+ f->exponent = BFLOAT_FAILED_UNPACKED;
+ return;
+ }
+
+ /*
+ * The greatest differing bit of l and r is the first bit we must
+ * include in the bfloat mantissa we're creating in order to do
+ * comparisons - that bit always becomes the high bit of
+ * bfloat->mantissa, and thus the exponent we're calculating here is
+ * the position of what will become the low bit in bfloat->mantissa:
+ *
+ * Note that this may be negative - we may be running off the low end
+ * of the key: we handle this later:
+ */
+ high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
+ min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
+ exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
+
+ /*
+ * Then we calculate the actual shift value, from the start of the key
+ * (k->_data), to get the key bits starting at exponent:
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
+
+ EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
+#else
+ shift = high_bit_offset +
+ b->nr_key_bits -
+ exponent -
+ BKEY_MANTISSA_BITS;
+
+ EBUG_ON(shift < KEY_PACKED_BITS_START);
+#endif
+ EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
+
+ f->exponent = shift;
+ mantissa = bkey_mantissa(m, f, j);
+
+ /*
+ * If we've got garbage bits, set them to all 1s - it's legal for the
+ * bfloat to compare larger than the original key, but not smaller:
+ */
+ if (exponent < 0)
+ mantissa |= ~(~0U << -exponent);
+
+ f->mantissa = mantissa;
+}
+
+/* bytes remaining - only valid for last bset: */
+static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
+{
+ bset_aux_tree_verify(b);
+
+ return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
+}
+
+static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
+{
+ return __bset_tree_capacity(b, t) /
+ (sizeof(struct bkey_float) + sizeof(u8));
+}
+
+static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
+{
+ return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
+}
+
+static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
+{
+ struct bkey_packed *k;
+
+ t->size = 1;
+ t->extra = BSET_RW_AUX_TREE_VAL;
+ rw_aux_tree(b, t)[0].offset =
+ __btree_node_key_to_offset(b, btree_bkey_first(b, t));
+
+ bset_tree_for_each_key(b, t, k) {
+ if (t->size == bset_rw_tree_capacity(b, t))
+ break;
+
+ if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
+ L1_CACHE_BYTES)
+ rw_aux_tree_set(b, t, t->size++, k);
+ }
+}
+
+static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
+{
+ struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
+ struct bkey_i min_key, max_key;
+ unsigned j, cacheline = 1;
+
+ t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
+ bset_ro_tree_capacity(b, t));
+retry:
+ if (t->size < 2) {
+ t->size = 0;
+ t->extra = BSET_NO_AUX_TREE_VAL;
+ return;
+ }
+
+ t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
+
+ /* First we figure out where the first key in each cacheline is */
+ eytzinger1_for_each(j, t->size - 1) {
+ while (bkey_to_cacheline(b, t, k) < cacheline)
+ prev = k, k = bkey_p_next(k);
+
+ if (k >= btree_bkey_last(b, t)) {
+ /* XXX: this path sucks */
+ t->size--;
+ goto retry;
+ }
+
+ ro_aux_tree_prev(b, t)[j] = prev->u64s;
+ bkey_float(b, t, j)->key_offset =
+ bkey_to_cacheline_offset(b, t, cacheline++, k);
+
+ EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
+ EBUG_ON(tree_to_bkey(b, t, j) != k);
+ }
+
+ while (k != btree_bkey_last(b, t))
+ prev = k, k = bkey_p_next(k);
+
+ if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
+ bkey_init(&min_key.k);
+ min_key.k.p = b->data->min_key;
+ }
+
+ if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
+ bkey_init(&max_key.k);
+ max_key.k.p = b->data->max_key;
+ }
+
+ /* Then we build the tree */
+ eytzinger1_for_each(j, t->size - 1)
+ make_bfloat(b, t, j,
+ bkey_to_packed(&min_key),
+ bkey_to_packed(&max_key));
+}
+
+static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
+{
+ struct bset_tree *i;
+
+ for (i = b->set; i != t; i++)
+ BUG_ON(bset_has_rw_aux_tree(i));
+
+ bch2_bset_set_no_aux_tree(b, t);
+
+ /* round up to next cacheline: */
+ t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
+ SMP_CACHE_BYTES / sizeof(u64));
+
+ bset_aux_tree_verify(b);
+}
+
+void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
+ bool writeable)
+{
+ if (writeable
+ ? bset_has_rw_aux_tree(t)
+ : bset_has_ro_aux_tree(t))
+ return;
+
+ bset_alloc_tree(b, t);
+
+ if (!__bset_tree_capacity(b, t))
+ return;
+
+ if (writeable)
+ __build_rw_aux_tree(b, t);
+ else
+ __build_ro_aux_tree(b, t);
+
+ bset_aux_tree_verify(b);
+}
+
+void bch2_bset_init_first(struct btree *b, struct bset *i)
+{
+ struct bset_tree *t;
+
+ BUG_ON(b->nsets);
+
+ memset(i, 0, sizeof(*i));
+ get_random_bytes(&i->seq, sizeof(i->seq));
+ SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
+
+ t = &b->set[b->nsets++];
+ set_btree_bset(b, t, i);
+}
+
+void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
+ struct btree_node_entry *bne)
+{
+ struct bset *i = &bne->keys;
+ struct bset_tree *t;
+
+ BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
+ BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
+ BUG_ON(b->nsets >= MAX_BSETS);
+
+ memset(i, 0, sizeof(*i));
+ i->seq = btree_bset_first(b)->seq;
+ SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
+
+ t = &b->set[b->nsets++];
+ set_btree_bset(b, t, i);
+}
+
+/*
+ * find _some_ key in the same bset as @k that precedes @k - not necessarily the
+ * immediate predecessor:
+ */
+static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
+ struct bkey_packed *k)
+{
+ struct bkey_packed *p;
+ unsigned offset;
+ int j;
+
+ EBUG_ON(k < btree_bkey_first(b, t) ||
+ k > btree_bkey_last(b, t));
+
+ if (k == btree_bkey_first(b, t))
+ return NULL;
+
+ switch (bset_aux_tree_type(t)) {
+ case BSET_NO_AUX_TREE:
+ p = btree_bkey_first(b, t);
+ break;
+ case BSET_RO_AUX_TREE:
+ j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
+
+ do {
+ p = j ? tree_to_bkey(b, t,
+ __inorder_to_eytzinger1(j--,
+ t->size - 1, t->extra))
+ : btree_bkey_first(b, t);
+ } while (p >= k);
+ break;
+ case BSET_RW_AUX_TREE:
+ offset = __btree_node_key_to_offset(b, k);
+ j = rw_aux_tree_bsearch(b, t, offset);
+ p = j ? rw_aux_to_bkey(b, t, j - 1)
+ : btree_bkey_first(b, t);
+ break;
+ }
+
+ return p;
+}
+
+struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
+ struct bset_tree *t,
+ struct bkey_packed *k,
+ unsigned min_key_type)
+{
+ struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
+
+ while ((p = __bkey_prev(b, t, k)) && !ret) {
+ for (i = p; i != k; i = bkey_p_next(i))
+ if (i->type >= min_key_type)
+ ret = i;
+
+ k = p;
+ }
+
+ if (bch2_expensive_debug_checks) {
+ BUG_ON(ret >= orig_k);
+
+ for (i = ret
+ ? bkey_p_next(ret)
+ : btree_bkey_first(b, t);
+ i != orig_k;
+ i = bkey_p_next(i))
+ BUG_ON(i->type >= min_key_type);
+ }
+
+ return ret;
+}
+
+/* Insert */
+
+static void bch2_bset_fix_lookup_table(struct btree *b,
+ struct bset_tree *t,
+ struct bkey_packed *_where,
+ unsigned clobber_u64s,
+ unsigned new_u64s)
+{
+ int shift = new_u64s - clobber_u64s;
+ unsigned l, j, where = __btree_node_key_to_offset(b, _where);
+
+ EBUG_ON(bset_has_ro_aux_tree(t));
+
+ if (!bset_has_rw_aux_tree(t))
+ return;
+
+ /* returns first entry >= where */
+ l = rw_aux_tree_bsearch(b, t, where);
+
+ if (!l) /* never delete first entry */
+ l++;
+ else if (l < t->size &&
+ where < t->end_offset &&
+ rw_aux_tree(b, t)[l].offset == where)
+ rw_aux_tree_set(b, t, l++, _where);
+
+ /* l now > where */
+
+ for (j = l;
+ j < t->size &&
+ rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
+ j++)
+ ;
+
+ if (j < t->size &&
+ rw_aux_tree(b, t)[j].offset + shift ==
+ rw_aux_tree(b, t)[l - 1].offset)
+ j++;
+
+ memmove(&rw_aux_tree(b, t)[l],
+ &rw_aux_tree(b, t)[j],
+ (void *) &rw_aux_tree(b, t)[t->size] -
+ (void *) &rw_aux_tree(b, t)[j]);
+ t->size -= j - l;
+
+ for (j = l; j < t->size; j++)
+ rw_aux_tree(b, t)[j].offset += shift;
+
+ EBUG_ON(l < t->size &&
+ rw_aux_tree(b, t)[l].offset ==
+ rw_aux_tree(b, t)[l - 1].offset);
+
+ if (t->size < bset_rw_tree_capacity(b, t) &&
+ (l < t->size
+ ? rw_aux_tree(b, t)[l].offset
+ : t->end_offset) -
+ rw_aux_tree(b, t)[l - 1].offset >
+ L1_CACHE_BYTES / sizeof(u64)) {
+ struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
+ struct bkey_packed *end = l < t->size
+ ? rw_aux_to_bkey(b, t, l)
+ : btree_bkey_last(b, t);
+ struct bkey_packed *k = start;
+
+ while (1) {
+ k = bkey_p_next(k);
+ if (k == end)
+ break;
+
+ if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
+ memmove(&rw_aux_tree(b, t)[l + 1],
+ &rw_aux_tree(b, t)[l],
+ (void *) &rw_aux_tree(b, t)[t->size] -
+ (void *) &rw_aux_tree(b, t)[l]);
+ t->size++;
+ rw_aux_tree_set(b, t, l, k);
+ break;
+ }
+ }
+ }
+
+ bch2_bset_verify_rw_aux_tree(b, t);
+ bset_aux_tree_verify(b);
+}
+
+void bch2_bset_insert(struct btree *b,
+ struct btree_node_iter *iter,
+ struct bkey_packed *where,
+ struct bkey_i *insert,
+ unsigned clobber_u64s)
+{
+ struct bkey_format *f = &b->format;
+ struct bset_tree *t = bset_tree_last(b);
+ struct bkey_packed packed, *src = bkey_to_packed(insert);
+
+ bch2_bset_verify_rw_aux_tree(b, t);
+ bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
+
+ if (bch2_bkey_pack_key(&packed, &insert->k, f))
+ src = &packed;
+
+ if (!bkey_deleted(&insert->k))
+ btree_keys_account_key_add(&b->nr, t - b->set, src);
+
+ if (src->u64s != clobber_u64s) {
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
+ u64 *dst_p = (u64 *) where->_data + src->u64s;
+
+ EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
+ (int) clobber_u64s - src->u64s);
+
+ memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
+ le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
+ set_btree_bset_end(b, t);
+ }
+
+ memcpy_u64s_small(where, src,
+ bkeyp_key_u64s(f, src));
+ memcpy_u64s(bkeyp_val(f, where), &insert->v,
+ bkeyp_val_u64s(f, src));
+
+ if (src->u64s != clobber_u64s)
+ bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
+
+ bch2_verify_btree_nr_keys(b);
+}
+
+void bch2_bset_delete(struct btree *b,
+ struct bkey_packed *where,
+ unsigned clobber_u64s)
+{
+ struct bset_tree *t = bset_tree_last(b);
+ u64 *src_p = (u64 *) where->_data + clobber_u64s;
+ u64 *dst_p = where->_data;
+
+ bch2_bset_verify_rw_aux_tree(b, t);
+
+ EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
+
+ memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
+ le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
+ set_btree_bset_end(b, t);
+
+ bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
+}
+
+/* Lookup */
+
+__flatten
+static struct bkey_packed *bset_search_write_set(const struct btree *b,
+ struct bset_tree *t,
+ struct bpos *search)
+{
+ unsigned l = 0, r = t->size;
+
+ while (l + 1 != r) {
+ unsigned m = (l + r) >> 1;
+
+ if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
+ l = m;
+ else
+ r = m;
+ }
+
+ return rw_aux_to_bkey(b, t, l);
+}
+
+static inline void prefetch_four_cachelines(void *p)
+{
+#ifdef CONFIG_X86_64
+ asm("prefetcht0 (-127 + 64 * 0)(%0);"
+ "prefetcht0 (-127 + 64 * 1)(%0);"
+ "prefetcht0 (-127 + 64 * 2)(%0);"
+ "prefetcht0 (-127 + 64 * 3)(%0);"
+ :
+ : "r" (p + 127));
+#else
+ prefetch(p + L1_CACHE_BYTES * 0);
+ prefetch(p + L1_CACHE_BYTES * 1);
+ prefetch(p + L1_CACHE_BYTES * 2);
+ prefetch(p + L1_CACHE_BYTES * 3);
+#endif
+}
+
+static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
+ const struct bkey_float *f,
+ unsigned idx)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
+
+ return f->exponent > key_bits_start;
+#else
+ unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
+
+ return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
+#endif
+}
+
+__flatten
+static struct bkey_packed *bset_search_tree(const struct btree *b,
+ const struct bset_tree *t,
+ const struct bpos *search,
+ const struct bkey_packed *packed_search)
+{
+ struct ro_aux_tree *base = ro_aux_tree_base(b, t);
+ struct bkey_float *f;
+ struct bkey_packed *k;
+ unsigned inorder, n = 1, l, r;
+ int cmp;
+
+ do {
+ if (likely(n << 4 < t->size))
+ prefetch(&base->f[n << 4]);
+
+ f = &base->f[n];
+ if (unlikely(f->exponent >= BFLOAT_FAILED))
+ goto slowpath;
+
+ l = f->mantissa;
+ r = bkey_mantissa(packed_search, f, n);
+
+ if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
+ goto slowpath;
+
+ n = n * 2 + (l < r);
+ continue;
+slowpath:
+ k = tree_to_bkey(b, t, n);
+ cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
+ if (!cmp)
+ return k;
+
+ n = n * 2 + (cmp < 0);
+ } while (n < t->size);
+
+ inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
+
+ /*
+ * n would have been the node we recursed to - the low bit tells us if
+ * we recursed left or recursed right.
+ */
+ if (likely(!(n & 1))) {
+ --inorder;
+ if (unlikely(!inorder))
+ return btree_bkey_first(b, t);
+
+ f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
+ }
+
+ return cacheline_to_bkey(b, t, inorder, f->key_offset);
+}
+
+static __always_inline __flatten
+struct bkey_packed *__bch2_bset_search(struct btree *b,
+ struct bset_tree *t,
+ struct bpos *search,
+ const struct bkey_packed *lossy_packed_search)
+{
+
+ /*
+ * First, we search for a cacheline, then lastly we do a linear search
+ * within that cacheline.
+ *
+ * To search for the cacheline, there's three different possibilities:
+ * * The set is too small to have a search tree, so we just do a linear
+ * search over the whole set.
+ * * The set is the one we're currently inserting into; keeping a full
+ * auxiliary search tree up to date would be too expensive, so we
+ * use a much simpler lookup table to do a binary search -
+ * bset_search_write_set().
+ * * Or we use the auxiliary search tree we constructed earlier -
+ * bset_search_tree()
+ */
+
+ switch (bset_aux_tree_type(t)) {
+ case BSET_NO_AUX_TREE:
+ return btree_bkey_first(b, t);
+ case BSET_RW_AUX_TREE:
+ return bset_search_write_set(b, t, search);
+ case BSET_RO_AUX_TREE:
+ return bset_search_tree(b, t, search, lossy_packed_search);
+ default:
+ BUG();
+ }
+}
+
+static __always_inline __flatten
+struct bkey_packed *bch2_bset_search_linear(struct btree *b,
+ struct bset_tree *t,
+ struct bpos *search,
+ struct bkey_packed *packed_search,
+ const struct bkey_packed *lossy_packed_search,
+ struct bkey_packed *m)
+{
+ if (lossy_packed_search)
+ while (m != btree_bkey_last(b, t) &&
+ bkey_iter_cmp_p_or_unp(b, m,
+ lossy_packed_search, search) < 0)
+ m = bkey_p_next(m);
+
+ if (!packed_search)
+ while (m != btree_bkey_last(b, t) &&
+ bkey_iter_pos_cmp(b, m, search) < 0)
+ m = bkey_p_next(m);
+
+ if (bch2_expensive_debug_checks) {
+ struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
+
+ BUG_ON(prev &&
+ bkey_iter_cmp_p_or_unp(b, prev,
+ packed_search, search) >= 0);
+ }
+
+ return m;
+}
+
+/* Btree node iterator */
+
+static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
+ struct btree *b,
+ const struct bkey_packed *k,
+ const struct bkey_packed *end)
+{
+ if (k != end) {
+ struct btree_node_iter_set *pos;
+
+ btree_node_iter_for_each(iter, pos)
+ ;
+
+ BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
+ *pos = (struct btree_node_iter_set) {
+ __btree_node_key_to_offset(b, k),
+ __btree_node_key_to_offset(b, end)
+ };
+ }
+}
+
+void bch2_btree_node_iter_push(struct btree_node_iter *iter,
+ struct btree *b,
+ const struct bkey_packed *k,
+ const struct bkey_packed *end)
+{
+ __bch2_btree_node_iter_push(iter, b, k, end);
+ bch2_btree_node_iter_sort(iter, b);
+}
+
+noinline __flatten __cold
+static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
+ struct btree *b, struct bpos *search)
+{
+ struct bkey_packed *k;
+
+ trace_bkey_pack_pos_fail(search);
+
+ bch2_btree_node_iter_init_from_start(iter, b);
+
+ while ((k = bch2_btree_node_iter_peek(iter, b)) &&
+ bkey_iter_pos_cmp(b, k, search) < 0)
+ bch2_btree_node_iter_advance(iter, b);
+}
+
+/**
+ * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
+ * given position
+ *
+ * @iter: iterator to initialize
+ * @b: btree node to search
+ * @search: search key
+ *
+ * Main entry point to the lookup code for individual btree nodes:
+ *
+ * NOTE:
+ *
+ * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
+ * keys. This doesn't matter for most code, but it does matter for lookups.
+ *
+ * Some adjacent keys with a string of equal keys:
+ * i j k k k k l m
+ *
+ * If you search for k, the lookup code isn't guaranteed to return you any
+ * specific k. The lookup code is conceptually doing a binary search and
+ * iterating backwards is very expensive so if the pivot happens to land at the
+ * last k that's what you'll get.
+ *
+ * This works out ok, but it's something to be aware of:
+ *
+ * - For non extents, we guarantee that the live key comes last - see
+ * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
+ * see will only be deleted keys you don't care about.
+ *
+ * - For extents, deleted keys sort last (see the comment at the top of this
+ * file). But when you're searching for extents, you actually want the first
+ * key strictly greater than your search key - an extent that compares equal
+ * to the search key is going to have 0 sectors after the search key.
+ *
+ * But this does mean that we can't just search for
+ * bpos_successor(start_of_range) to get the first extent that overlaps with
+ * the range we want - if we're unlucky and there's an extent that ends
+ * exactly where we searched, then there could be a deleted key at the same
+ * position and we'd get that when we search instead of the preceding extent
+ * we needed.
+ *
+ * So we've got to search for start_of_range, then after the lookup iterate
+ * past any extents that compare equal to the position we searched for.
+ */
+__flatten
+void bch2_btree_node_iter_init(struct btree_node_iter *iter,
+ struct btree *b, struct bpos *search)
+{
+ struct bkey_packed p, *packed_search = NULL;
+ struct btree_node_iter_set *pos = iter->data;
+ struct bkey_packed *k[MAX_BSETS];
+ unsigned i;
+
+ EBUG_ON(bpos_lt(*search, b->data->min_key));
+ EBUG_ON(bpos_gt(*search, b->data->max_key));
+ bset_aux_tree_verify(b);
+
+ memset(iter, 0, sizeof(*iter));
+
+ switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
+ case BKEY_PACK_POS_EXACT:
+ packed_search = &p;
+ break;
+ case BKEY_PACK_POS_SMALLER:
+ packed_search = NULL;
+ break;
+ case BKEY_PACK_POS_FAIL:
+ btree_node_iter_init_pack_failed(iter, b, search);
+ return;
+ }
+
+ for (i = 0; i < b->nsets; i++) {
+ k[i] = __bch2_bset_search(b, b->set + i, search, &p);
+ prefetch_four_cachelines(k[i]);
+ }
+
+ for (i = 0; i < b->nsets; i++) {
+ struct bset_tree *t = b->set + i;
+ struct bkey_packed *end = btree_bkey_last(b, t);
+
+ k[i] = bch2_bset_search_linear(b, t, search,
+ packed_search, &p, k[i]);
+ if (k[i] != end)
+ *pos++ = (struct btree_node_iter_set) {
+ __btree_node_key_to_offset(b, k[i]),
+ __btree_node_key_to_offset(b, end)
+ };
+ }
+
+ bch2_btree_node_iter_sort(iter, b);
+}
+
+void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ struct bset_tree *t;
+
+ memset(iter, 0, sizeof(*iter));
+
+ for_each_bset(b, t)
+ __bch2_btree_node_iter_push(iter, b,
+ btree_bkey_first(b, t),
+ btree_bkey_last(b, t));
+ bch2_btree_node_iter_sort(iter, b);
+}
+
+struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
+ struct btree *b,
+ struct bset_tree *t)
+{
+ struct btree_node_iter_set *set;
+
+ btree_node_iter_for_each(iter, set)
+ if (set->end == t->end_offset)
+ return __btree_node_offset_to_key(b, set->k);
+
+ return btree_bkey_last(b, t);
+}
+
+static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
+ struct btree *b,
+ unsigned first)
+{
+ bool ret;
+
+ if ((ret = (btree_node_iter_cmp(b,
+ iter->data[first],
+ iter->data[first + 1]) > 0)))
+ swap(iter->data[first], iter->data[first + 1]);
+ return ret;
+}
+
+void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ /* unrolled bubble sort: */
+
+ if (!__btree_node_iter_set_end(iter, 2)) {
+ btree_node_iter_sort_two(iter, b, 0);
+ btree_node_iter_sort_two(iter, b, 1);
+ }
+
+ if (!__btree_node_iter_set_end(iter, 1))
+ btree_node_iter_sort_two(iter, b, 0);
+}
+
+void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
+ struct btree_node_iter_set *set)
+{
+ struct btree_node_iter_set *last =
+ iter->data + ARRAY_SIZE(iter->data) - 1;
+
+ memmove(&set[0], &set[1], (void *) last - (void *) set);
+ *last = (struct btree_node_iter_set) { 0, 0 };
+}
+
+static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
+
+ EBUG_ON(iter->data->k > iter->data->end);
+
+ if (unlikely(__btree_node_iter_set_end(iter, 0))) {
+ /* avoid an expensive memmove call: */
+ iter->data[0] = iter->data[1];
+ iter->data[1] = iter->data[2];
+ iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
+ return;
+ }
+
+ if (__btree_node_iter_set_end(iter, 1))
+ return;
+
+ if (!btree_node_iter_sort_two(iter, b, 0))
+ return;
+
+ if (__btree_node_iter_set_end(iter, 2))
+ return;
+
+ btree_node_iter_sort_two(iter, b, 1);
+}
+
+void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ if (bch2_expensive_debug_checks) {
+ bch2_btree_node_iter_verify(iter, b);
+ bch2_btree_node_iter_next_check(iter, b);
+ }
+
+ __bch2_btree_node_iter_advance(iter, b);
+}
+
+/*
+ * Expensive:
+ */
+struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ struct bkey_packed *k, *prev = NULL;
+ struct btree_node_iter_set *set;
+ struct bset_tree *t;
+ unsigned end = 0;
+
+ if (bch2_expensive_debug_checks)
+ bch2_btree_node_iter_verify(iter, b);
+
+ for_each_bset(b, t) {
+ k = bch2_bkey_prev_all(b, t,
+ bch2_btree_node_iter_bset_pos(iter, b, t));
+ if (k &&
+ (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
+ prev = k;
+ end = t->end_offset;
+ }
+ }
+
+ if (!prev)
+ return NULL;
+
+ /*
+ * We're manually memmoving instead of just calling sort() to ensure the
+ * prev we picked ends up in slot 0 - sort won't necessarily put it
+ * there because of duplicate deleted keys:
+ */
+ btree_node_iter_for_each(iter, set)
+ if (set->end == end)
+ goto found;
+
+ BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
+found:
+ BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
+
+ memmove(&iter->data[1],
+ &iter->data[0],
+ (void *) set - (void *) &iter->data[0]);
+
+ iter->data[0].k = __btree_node_key_to_offset(b, prev);
+ iter->data[0].end = end;
+
+ if (bch2_expensive_debug_checks)
+ bch2_btree_node_iter_verify(iter, b);
+ return prev;
+}
+
+struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ struct bkey_packed *prev;
+
+ do {
+ prev = bch2_btree_node_iter_prev_all(iter, b);
+ } while (prev && bkey_deleted(prev));
+
+ return prev;
+}
+
+struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
+ struct btree *b,
+ struct bkey *u)
+{
+ struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
+
+ return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
+}
+
+/* Mergesort */
+
+void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
+{
+ const struct bset_tree *t;
+
+ for_each_bset(b, t) {
+ enum bset_aux_tree_type type = bset_aux_tree_type(t);
+ size_t j;
+
+ stats->sets[type].nr++;
+ stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
+ sizeof(u64);
+
+ if (bset_has_ro_aux_tree(t)) {
+ stats->floats += t->size - 1;
+
+ for (j = 1; j < t->size; j++)
+ stats->failed +=
+ bkey_float(b, t, j)->exponent ==
+ BFLOAT_FAILED;
+ }
+ }
+}
+
+void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
+ struct bkey_packed *k)
+{
+ struct bset_tree *t = bch2_bkey_to_bset(b, k);
+ struct bkey uk;
+ unsigned j, inorder;
+
+ if (!bset_has_ro_aux_tree(t))
+ return;
+
+ inorder = bkey_to_cacheline(b, t, k);
+ if (!inorder || inorder >= t->size)
+ return;
+
+ j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
+ if (k != tree_to_bkey(b, t, j))
+ return;
+
+ switch (bkey_float(b, t, j)->exponent) {
+ case BFLOAT_FAILED:
+ uk = bkey_unpack_key(b, k);
+ prt_printf(out,
+ " failed unpacked at depth %u\n"
+ "\t",
+ ilog2(j));
+ bch2_bpos_to_text(out, uk.p);
+ prt_printf(out, "\n");
+ break;
+ }
+}
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
new file mode 100644
index 000000000000..632c2b8c5460
--- /dev/null
+++ b/fs/bcachefs/bset.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BSET_H
+#define _BCACHEFS_BSET_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "bcachefs.h"
+#include "bkey.h"
+#include "bkey_methods.h"
+#include "btree_types.h"
+#include "util.h" /* for time_stats */
+#include "vstructs.h"
+
+/*
+ * BKEYS:
+ *
+ * A bkey contains a key, a size field, a variable number of pointers, and some
+ * ancillary flag bits.
+ *
+ * We use two different functions for validating bkeys, bkey_invalid and
+ * bkey_deleted().
+ *
+ * The one exception to the rule that ptr_invalid() filters out invalid keys is
+ * that it also filters out keys of size 0 - these are keys that have been
+ * completely overwritten. It'd be safe to delete these in memory while leaving
+ * them on disk, just unnecessary work - so we filter them out when resorting
+ * instead.
+ *
+ * We can't filter out stale keys when we're resorting, because garbage
+ * collection needs to find them to ensure bucket gens don't wrap around -
+ * unless we're rewriting the btree node those stale keys still exist on disk.
+ *
+ * We also implement functions here for removing some number of sectors from the
+ * front or the back of a bkey - this is mainly used for fixing overlapping
+ * extents, by removing the overlapping sectors from the older key.
+ *
+ * BSETS:
+ *
+ * A bset is an array of bkeys laid out contiguously in memory in sorted order,
+ * along with a header. A btree node is made up of a number of these, written at
+ * different times.
+ *
+ * There could be many of them on disk, but we never allow there to be more than
+ * 4 in memory - we lazily resort as needed.
+ *
+ * We implement code here for creating and maintaining auxiliary search trees
+ * (described below) for searching an individial bset, and on top of that we
+ * implement a btree iterator.
+ *
+ * BTREE ITERATOR:
+ *
+ * Most of the code in bcache doesn't care about an individual bset - it needs
+ * to search entire btree nodes and iterate over them in sorted order.
+ *
+ * The btree iterator code serves both functions; it iterates through the keys
+ * in a btree node in sorted order, starting from either keys after a specific
+ * point (if you pass it a search key) or the start of the btree node.
+ *
+ * AUXILIARY SEARCH TREES:
+ *
+ * Since keys are variable length, we can't use a binary search on a bset - we
+ * wouldn't be able to find the start of the next key. But binary searches are
+ * slow anyways, due to terrible cache behaviour; bcache originally used binary
+ * searches and that code topped out at under 50k lookups/second.
+ *
+ * So we need to construct some sort of lookup table. Since we only insert keys
+ * into the last (unwritten) set, most of the keys within a given btree node are
+ * usually in sets that are mostly constant. We use two different types of
+ * lookup tables to take advantage of this.
+ *
+ * Both lookup tables share in common that they don't index every key in the
+ * set; they index one key every BSET_CACHELINE bytes, and then a linear search
+ * is used for the rest.
+ *
+ * For sets that have been written to disk and are no longer being inserted
+ * into, we construct a binary search tree in an array - traversing a binary
+ * search tree in an array gives excellent locality of reference and is very
+ * fast, since both children of any node are adjacent to each other in memory
+ * (and their grandchildren, and great grandchildren...) - this means
+ * prefetching can be used to great effect.
+ *
+ * It's quite useful performance wise to keep these nodes small - not just
+ * because they're more likely to be in L2, but also because we can prefetch
+ * more nodes on a single cacheline and thus prefetch more iterations in advance
+ * when traversing this tree.
+ *
+ * Nodes in the auxiliary search tree must contain both a key to compare against
+ * (we don't want to fetch the key from the set, that would defeat the purpose),
+ * and a pointer to the key. We use a few tricks to compress both of these.
+ *
+ * To compress the pointer, we take advantage of the fact that one node in the
+ * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
+ * a function (to_inorder()) that takes the index of a node in a binary tree and
+ * returns what its index would be in an inorder traversal, so we only have to
+ * store the low bits of the offset.
+ *
+ * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
+ * compress that, we take advantage of the fact that when we're traversing the
+ * search tree at every iteration we know that both our search key and the key
+ * we're looking for lie within some range - bounded by our previous
+ * comparisons. (We special case the start of a search so that this is true even
+ * at the root of the tree).
+ *
+ * So we know the key we're looking for is between a and b, and a and b don't
+ * differ higher than bit 50, we don't need to check anything higher than bit
+ * 50.
+ *
+ * We don't usually need the rest of the bits, either; we only need enough bits
+ * to partition the key range we're currently checking. Consider key n - the
+ * key our auxiliary search tree node corresponds to, and key p, the key
+ * immediately preceding n. The lowest bit we need to store in the auxiliary
+ * search tree is the highest bit that differs between n and p.
+ *
+ * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
+ * comparison. But we'd really like our nodes in the auxiliary search tree to be
+ * of fixed size.
+ *
+ * The solution is to make them fixed size, and when we're constructing a node
+ * check if p and n differed in the bits we needed them to. If they don't we
+ * flag that node, and when doing lookups we fallback to comparing against the
+ * real key. As long as this doesn't happen to often (and it seems to reliably
+ * happen a bit less than 1% of the time), we win - even on failures, that key
+ * is then more likely to be in cache than if we were doing binary searches all
+ * the way, since we're touching so much less memory.
+ *
+ * The keys in the auxiliary search tree are stored in (software) floating
+ * point, with an exponent and a mantissa. The exponent needs to be big enough
+ * to address all the bits in the original key, but the number of bits in the
+ * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
+ *
+ * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
+ * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
+ * We need one node per 128 bytes in the btree node, which means the auxiliary
+ * search trees take up 3% as much memory as the btree itself.
+ *
+ * Constructing these auxiliary search trees is moderately expensive, and we
+ * don't want to be constantly rebuilding the search tree for the last set
+ * whenever we insert another key into it. For the unwritten set, we use a much
+ * simpler lookup table - it's just a flat array, so index i in the lookup table
+ * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
+ * within each byte range works the same as with the auxiliary search trees.
+ *
+ * These are much easier to keep up to date when we insert a key - we do it
+ * somewhat lazily; when we shift a key up we usually just increment the pointer
+ * to it, only when it would overflow do we go to the trouble of finding the
+ * first key in that range of bytes again.
+ */
+
+enum bset_aux_tree_type {
+ BSET_NO_AUX_TREE,
+ BSET_RO_AUX_TREE,
+ BSET_RW_AUX_TREE,
+};
+
+#define BSET_TREE_NR_TYPES 3
+
+#define BSET_NO_AUX_TREE_VAL (U16_MAX)
+#define BSET_RW_AUX_TREE_VAL (U16_MAX - 1)
+
+static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree *t)
+{
+ switch (t->extra) {
+ case BSET_NO_AUX_TREE_VAL:
+ EBUG_ON(t->size);
+ return BSET_NO_AUX_TREE;
+ case BSET_RW_AUX_TREE_VAL:
+ EBUG_ON(!t->size);
+ return BSET_RW_AUX_TREE;
+ default:
+ EBUG_ON(!t->size);
+ return BSET_RO_AUX_TREE;
+ }
+}
+
+/*
+ * BSET_CACHELINE was originally intended to match the hardware cacheline size -
+ * it used to be 64, but I realized the lookup code would touch slightly less
+ * memory if it was 128.
+ *
+ * It definites the number of bytes (in struct bset) per struct bkey_float in
+ * the auxiliar search tree - when we're done searching the bset_float tree we
+ * have this many bytes left that we do a linear search over.
+ *
+ * Since (after level 5) every level of the bset_tree is on a new cacheline,
+ * we're touching one fewer cacheline in the bset tree in exchange for one more
+ * cacheline in the linear search - but the linear search might stop before it
+ * gets to the second cacheline.
+ */
+
+#define BSET_CACHELINE 256
+
+static inline size_t btree_keys_cachelines(const struct btree *b)
+{
+ return (1U << b->byte_order) / BSET_CACHELINE;
+}
+
+static inline size_t btree_aux_data_bytes(const struct btree *b)
+{
+ return btree_keys_cachelines(b) * 8;
+}
+
+static inline size_t btree_aux_data_u64s(const struct btree *b)
+{
+ return btree_aux_data_bytes(b) / sizeof(u64);
+}
+
+#define for_each_bset(_b, _t) \
+ for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
+
+#define bset_tree_for_each_key(_b, _t, _k) \
+ for (_k = btree_bkey_first(_b, _t); \
+ _k != btree_bkey_last(_b, _t); \
+ _k = bkey_p_next(_k))
+
+static inline bool bset_has_ro_aux_tree(const struct bset_tree *t)
+{
+ return bset_aux_tree_type(t) == BSET_RO_AUX_TREE;
+}
+
+static inline bool bset_has_rw_aux_tree(struct bset_tree *t)
+{
+ return bset_aux_tree_type(t) == BSET_RW_AUX_TREE;
+}
+
+static inline void bch2_bset_set_no_aux_tree(struct btree *b,
+ struct bset_tree *t)
+{
+ BUG_ON(t < b->set);
+
+ for (; t < b->set + ARRAY_SIZE(b->set); t++) {
+ t->size = 0;
+ t->extra = BSET_NO_AUX_TREE_VAL;
+ t->aux_data_offset = U16_MAX;
+ }
+}
+
+static inline void btree_node_set_format(struct btree *b,
+ struct bkey_format f)
+{
+ int len;
+
+ b->format = f;
+ b->nr_key_bits = bkey_format_key_bits(&f);
+
+ len = bch2_compile_bkey_format(&b->format, b->aux_data);
+ BUG_ON(len < 0 || len > U8_MAX);
+
+ b->unpack_fn_len = len;
+
+ bch2_bset_set_no_aux_tree(b, b->set);
+}
+
+static inline struct bset *bset_next_set(struct btree *b,
+ unsigned block_bytes)
+{
+ struct bset *i = btree_bset_last(b);
+
+ EBUG_ON(!is_power_of_2(block_bytes));
+
+ return ((void *) i) + round_up(vstruct_bytes(i), block_bytes);
+}
+
+void bch2_btree_keys_init(struct btree *);
+
+void bch2_bset_init_first(struct btree *, struct bset *);
+void bch2_bset_init_next(struct bch_fs *, struct btree *,
+ struct btree_node_entry *);
+void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
+
+void bch2_bset_insert(struct btree *, struct btree_node_iter *,
+ struct bkey_packed *, struct bkey_i *, unsigned);
+void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
+
+/* Bkey utility code */
+
+/* packed or unpacked */
+static inline int bkey_cmp_p_or_unp(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r_packed,
+ const struct bpos *r)
+{
+ EBUG_ON(r_packed && !bkey_packed(r_packed));
+
+ if (unlikely(!bkey_packed(l)))
+ return bpos_cmp(packed_to_bkey_c(l)->p, *r);
+
+ if (likely(r_packed))
+ return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b);
+
+ return __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
+}
+
+static inline struct bset_tree *
+bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k)
+{
+ unsigned offset = __btree_node_key_to_offset(b, k);
+ struct bset_tree *t;
+
+ for_each_bset(b, t)
+ if (offset <= t->end_offset) {
+ EBUG_ON(offset < btree_bkey_first_offset(t));
+ return t;
+ }
+
+ BUG();
+}
+
+struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *);
+
+struct bkey_packed *bch2_bkey_prev_filter(struct btree *, struct bset_tree *,
+ struct bkey_packed *, unsigned);
+
+static inline struct bkey_packed *
+bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
+{
+ return bch2_bkey_prev_filter(b, t, k, 0);
+}
+
+static inline struct bkey_packed *
+bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
+{
+ return bch2_bkey_prev_filter(b, t, k, 1);
+}
+
+/* Btree key iteration */
+
+void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
+ const struct bkey_packed *,
+ const struct bkey_packed *);
+void bch2_btree_node_iter_init(struct btree_node_iter *, struct btree *,
+ struct bpos *);
+void bch2_btree_node_iter_init_from_start(struct btree_node_iter *,
+ struct btree *);
+struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *,
+ struct btree *,
+ struct bset_tree *);
+
+void bch2_btree_node_iter_sort(struct btree_node_iter *, struct btree *);
+void bch2_btree_node_iter_set_drop(struct btree_node_iter *,
+ struct btree_node_iter_set *);
+void bch2_btree_node_iter_advance(struct btree_node_iter *, struct btree *);
+
+#define btree_node_iter_for_each(_iter, _set) \
+ for (_set = (_iter)->data; \
+ _set < (_iter)->data + ARRAY_SIZE((_iter)->data) && \
+ (_set)->k != (_set)->end; \
+ _set++)
+
+static inline bool __btree_node_iter_set_end(struct btree_node_iter *iter,
+ unsigned i)
+{
+ return iter->data[i].k == iter->data[i].end;
+}
+
+static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter)
+{
+ return __btree_node_iter_set_end(iter, 0);
+}
+
+/*
+ * When keys compare equal, deleted keys compare first:
+ *
+ * XXX: only need to compare pointers for keys that are both within a
+ * btree_node_iterator - we need to break ties for prev() to work correctly
+ */
+static inline int bkey_iter_cmp(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r)
+{
+ return bch2_bkey_cmp_packed(b, l, r)
+ ?: (int) bkey_deleted(r) - (int) bkey_deleted(l)
+ ?: cmp_int(l, r);
+}
+
+static inline int btree_node_iter_cmp(const struct btree *b,
+ struct btree_node_iter_set l,
+ struct btree_node_iter_set r)
+{
+ return bkey_iter_cmp(b,
+ __btree_node_offset_to_key(b, l.k),
+ __btree_node_offset_to_key(b, r.k));
+}
+
+/* These assume r (the search key) is not a deleted key: */
+static inline int bkey_iter_pos_cmp(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bpos *r)
+{
+ return bkey_cmp_left_packed(b, l, r)
+ ?: -((int) bkey_deleted(l));
+}
+
+static inline int bkey_iter_cmp_p_or_unp(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r_packed,
+ const struct bpos *r)
+{
+ return bkey_cmp_p_or_unp(b, l, r_packed, r)
+ ?: -((int) bkey_deleted(l));
+}
+
+static inline struct bkey_packed *
+__bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
+ struct btree *b)
+{
+ return __btree_node_offset_to_key(b, iter->data->k);
+}
+
+static inline struct bkey_packed *
+bch2_btree_node_iter_peek_all(struct btree_node_iter *iter, struct btree *b)
+{
+ return !bch2_btree_node_iter_end(iter)
+ ? __btree_node_offset_to_key(b, iter->data->k)
+ : NULL;
+}
+
+static inline struct bkey_packed *
+bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
+{
+ struct bkey_packed *k;
+
+ while ((k = bch2_btree_node_iter_peek_all(iter, b)) &&
+ bkey_deleted(k))
+ bch2_btree_node_iter_advance(iter, b);
+
+ return k;
+}
+
+static inline struct bkey_packed *
+bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
+{
+ struct bkey_packed *ret = bch2_btree_node_iter_peek_all(iter, b);
+
+ if (ret)
+ bch2_btree_node_iter_advance(iter, b);
+
+ return ret;
+}
+
+struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *,
+ struct btree *);
+struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *,
+ struct btree *);
+
+struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
+ struct btree *,
+ struct bkey *);
+
+#define for_each_btree_node_key(b, k, iter) \
+ for (bch2_btree_node_iter_init_from_start((iter), (b)); \
+ (k = bch2_btree_node_iter_peek((iter), (b))); \
+ bch2_btree_node_iter_advance(iter, b))
+
+#define for_each_btree_node_key_unpack(b, k, iter, unpacked) \
+ for (bch2_btree_node_iter_init_from_start((iter), (b)); \
+ (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
+ bch2_btree_node_iter_advance(iter, b))
+
+/* Accounting: */
+
+static inline void btree_keys_account_key(struct btree_nr_keys *n,
+ unsigned bset,
+ struct bkey_packed *k,
+ int sign)
+{
+ n->live_u64s += k->u64s * sign;
+ n->bset_u64s[bset] += k->u64s * sign;
+
+ if (bkey_packed(k))
+ n->packed_keys += sign;
+ else
+ n->unpacked_keys += sign;
+}
+
+static inline void btree_keys_account_val_delta(struct btree *b,
+ struct bkey_packed *k,
+ int delta)
+{
+ struct bset_tree *t = bch2_bkey_to_bset(b, k);
+
+ b->nr.live_u64s += delta;
+ b->nr.bset_u64s[t - b->set] += delta;
+}
+
+#define btree_keys_account_key_add(_nr, _bset_idx, _k) \
+ btree_keys_account_key(_nr, _bset_idx, _k, 1)
+#define btree_keys_account_key_drop(_nr, _bset_idx, _k) \
+ btree_keys_account_key(_nr, _bset_idx, _k, -1)
+
+#define btree_account_key_add(_b, _k) \
+ btree_keys_account_key(&(_b)->nr, \
+ bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, 1)
+#define btree_account_key_drop(_b, _k) \
+ btree_keys_account_key(&(_b)->nr, \
+ bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, -1)
+
+struct bset_stats {
+ struct {
+ size_t nr, bytes;
+ } sets[BSET_TREE_NR_TYPES];
+
+ size_t floats;
+ size_t failed;
+};
+
+void bch2_btree_keys_stats(const struct btree *, struct bset_stats *);
+void bch2_bfloat_to_text(struct printbuf *, struct btree *,
+ struct bkey_packed *);
+
+/* Debug stuff */
+
+void bch2_dump_bset(struct bch_fs *, struct btree *, struct bset *, unsigned);
+void bch2_dump_btree_node(struct bch_fs *, struct btree *);
+void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+void __bch2_verify_btree_nr_keys(struct btree *);
+void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *);
+void bch2_verify_insert_pos(struct btree *, struct bkey_packed *,
+ struct bkey_packed *, unsigned);
+
+#else
+
+static inline void __bch2_verify_btree_nr_keys(struct btree *b) {}
+static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
+ struct btree *b) {}
+static inline void bch2_verify_insert_pos(struct btree *b,
+ struct bkey_packed *where,
+ struct bkey_packed *insert,
+ unsigned clobber_u64s) {}
+#endif
+
+static inline void bch2_verify_btree_nr_keys(struct btree *b)
+{
+ if (bch2_debug_check_btree_accounting)
+ __bch2_verify_btree_nr_keys(b);
+}
+
+#endif /* _BCACHEFS_BSET_H */
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
new file mode 100644
index 000000000000..82cf243aa288
--- /dev/null
+++ b/fs/bcachefs/btree_cache.c
@@ -0,0 +1,1202 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "btree_cache.h"
+#include "btree_io.h"
+#include "btree_iter.h"
+#include "btree_locking.h"
+#include "debug.h"
+#include "errcode.h"
+#include "error.h"
+#include "trace.h"
+
+#include <linux/prefetch.h>
+#include <linux/sched/mm.h>
+
+const char * const bch2_btree_node_flags[] = {
+#define x(f) #f,
+ BTREE_FLAGS()
+#undef x
+ NULL
+};
+
+void bch2_recalc_btree_reserve(struct bch_fs *c)
+{
+ unsigned i, reserve = 16;
+
+ if (!c->btree_roots_known[0].b)
+ reserve += 8;
+
+ for (i = 0; i < btree_id_nr_alive(c); i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (r->b)
+ reserve += min_t(unsigned, 1, r->b->c.level) * 8;
+ }
+
+ c->btree_cache.reserve = reserve;
+}
+
+static inline unsigned btree_cache_can_free(struct btree_cache *bc)
+{
+ return max_t(int, 0, bc->used - bc->reserve);
+}
+
+static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
+{
+ if (b->c.lock.readers)
+ list_move(&b->list, &bc->freed_pcpu);
+ else
+ list_move(&b->list, &bc->freed_nonpcpu);
+}
+
+static void btree_node_data_free(struct bch_fs *c, struct btree *b)
+{
+ struct btree_cache *bc = &c->btree_cache;
+
+ EBUG_ON(btree_node_write_in_flight(b));
+
+ clear_btree_node_just_written(b);
+
+ kvpfree(b->data, btree_bytes(c));
+ b->data = NULL;
+#ifdef __KERNEL__
+ kvfree(b->aux_data);
+#else
+ munmap(b->aux_data, btree_aux_data_bytes(b));
+#endif
+ b->aux_data = NULL;
+
+ bc->used--;
+
+ btree_node_to_freedlist(bc, b);
+}
+
+static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct btree *b = obj;
+ const u64 *v = arg->key;
+
+ return b->hash_val == *v ? 0 : 1;
+}
+
+static const struct rhashtable_params bch_btree_cache_params = {
+ .head_offset = offsetof(struct btree, hash),
+ .key_offset = offsetof(struct btree, hash_val),
+ .key_len = sizeof(u64),
+ .obj_cmpfn = bch2_btree_cache_cmp_fn,
+};
+
+static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+{
+ BUG_ON(b->data || b->aux_data);
+
+ b->data = kvpmalloc(btree_bytes(c), gfp);
+ if (!b->data)
+ return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
+#ifdef __KERNEL__
+ b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
+#else
+ b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (b->aux_data == MAP_FAILED)
+ b->aux_data = NULL;
+#endif
+ if (!b->aux_data) {
+ kvpfree(b->data, btree_bytes(c));
+ b->data = NULL;
+ return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
+ }
+
+ return 0;
+}
+
+static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
+{
+ struct btree *b;
+
+ b = kzalloc(sizeof(struct btree), gfp);
+ if (!b)
+ return NULL;
+
+ bkey_btree_ptr_init(&b->key);
+ INIT_LIST_HEAD(&b->list);
+ INIT_LIST_HEAD(&b->write_blocked);
+ b->byte_order = ilog2(btree_bytes(c));
+ return b;
+}
+
+struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+
+ b = __btree_node_mem_alloc(c, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ if (btree_node_data_alloc(c, b, GFP_KERNEL)) {
+ kfree(b);
+ return NULL;
+ }
+
+ bch2_btree_lock_init(&b->c, 0);
+
+ bc->used++;
+ list_add(&b->list, &bc->freeable);
+ return b;
+}
+
+/* Btree in memory cache - hash table */
+
+void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
+{
+ int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+
+ BUG_ON(ret);
+
+ /* Cause future lookups for this node to fail: */
+ b->hash_val = 0;
+}
+
+int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
+{
+ BUG_ON(b->hash_val);
+ b->hash_val = btree_ptr_hash_val(&b->key);
+
+ return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
+ bch_btree_cache_params);
+}
+
+int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
+ unsigned level, enum btree_id id)
+{
+ int ret;
+
+ b->c.level = level;
+ b->c.btree_id = id;
+
+ mutex_lock(&bc->lock);
+ ret = __bch2_btree_node_hash_insert(bc, b);
+ if (!ret)
+ list_add_tail(&b->list, &bc->live);
+ mutex_unlock(&bc->lock);
+
+ return ret;
+}
+
+__flatten
+static inline struct btree *btree_cache_find(struct btree_cache *bc,
+ const struct bkey_i *k)
+{
+ u64 v = btree_ptr_hash_val(k);
+
+ return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params);
+}
+
+/*
+ * this version is for btree nodes that have already been freed (we're not
+ * reaping a real btree node)
+ */
+static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ int ret = 0;
+
+ lockdep_assert_held(&bc->lock);
+wait_on_io:
+ if (b->flags & ((1U << BTREE_NODE_dirty)|
+ (1U << BTREE_NODE_read_in_flight)|
+ (1U << BTREE_NODE_write_in_flight))) {
+ if (!flush)
+ return -BCH_ERR_ENOMEM_btree_node_reclaim;
+
+ /* XXX: waiting on IO with btree cache lock held */
+ bch2_btree_node_wait_on_read(b);
+ bch2_btree_node_wait_on_write(b);
+ }
+
+ if (!six_trylock_intent(&b->c.lock))
+ return -BCH_ERR_ENOMEM_btree_node_reclaim;
+
+ if (!six_trylock_write(&b->c.lock))
+ goto out_unlock_intent;
+
+ /* recheck under lock */
+ if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
+ (1U << BTREE_NODE_write_in_flight))) {
+ if (!flush)
+ goto out_unlock;
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
+ }
+
+ if (btree_node_noevict(b) ||
+ btree_node_write_blocked(b) ||
+ btree_node_will_make_reachable(b))
+ goto out_unlock;
+
+ if (btree_node_dirty(b)) {
+ if (!flush)
+ goto out_unlock;
+ /*
+ * Using the underscore version because we don't want to compact
+ * bsets after the write, since this node is about to be evicted
+ * - unless btree verify mode is enabled, since it runs out of
+ * the post write cleanup:
+ */
+ if (bch2_verify_btree_ondisk)
+ bch2_btree_node_write(c, b, SIX_LOCK_intent,
+ BTREE_WRITE_cache_reclaim);
+ else
+ __bch2_btree_node_write(c, b,
+ BTREE_WRITE_cache_reclaim);
+
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
+ }
+out:
+ if (b->hash_val && !ret)
+ trace_and_count(c, btree_cache_reap, c, b);
+ return ret;
+out_unlock:
+ six_unlock_write(&b->c.lock);
+out_unlock_intent:
+ six_unlock_intent(&b->c.lock);
+ ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
+ goto out;
+}
+
+static int btree_node_reclaim(struct bch_fs *c, struct btree *b)
+{
+ return __btree_node_reclaim(c, b, false);
+}
+
+static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
+{
+ return __btree_node_reclaim(c, b, true);
+}
+
+static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct bch_fs *c = container_of(shrink, struct bch_fs,
+ btree_cache.shrink);
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b, *t;
+ unsigned long nr = sc->nr_to_scan;
+ unsigned long can_free = 0;
+ unsigned long freed = 0;
+ unsigned long touched = 0;
+ unsigned i, flags;
+ unsigned long ret = SHRINK_STOP;
+ bool trigger_writes = atomic_read(&bc->dirty) + nr >=
+ bc->used * 3 / 4;
+
+ if (bch2_btree_shrinker_disabled)
+ return SHRINK_STOP;
+
+ mutex_lock(&bc->lock);
+ flags = memalloc_nofs_save();
+
+ /*
+ * It's _really_ critical that we don't free too many btree nodes - we
+ * have to always leave ourselves a reserve. The reserve is how we
+ * guarantee that allocating memory for a new btree node can always
+ * succeed, so that inserting keys into the btree can always succeed and
+ * IO can always make forward progress:
+ */
+ can_free = btree_cache_can_free(bc);
+ nr = min_t(unsigned long, nr, can_free);
+
+ i = 0;
+ list_for_each_entry_safe(b, t, &bc->freeable, list) {
+ /*
+ * Leave a few nodes on the freeable list, so that a btree split
+ * won't have to hit the system allocator:
+ */
+ if (++i <= 3)
+ continue;
+
+ touched++;
+
+ if (touched >= nr)
+ goto out;
+
+ if (!btree_node_reclaim(c, b)) {
+ btree_node_data_free(c, b);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ freed++;
+ }
+ }
+restart:
+ list_for_each_entry_safe(b, t, &bc->live, list) {
+ touched++;
+
+ if (btree_node_accessed(b)) {
+ clear_btree_node_accessed(b);
+ } else if (!btree_node_reclaim(c, b)) {
+ freed++;
+ btree_node_data_free(c, b);
+
+ bch2_btree_node_hash_remove(bc, b);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+
+ if (freed == nr)
+ goto out_rotate;
+ } else if (trigger_writes &&
+ btree_node_dirty(b) &&
+ !btree_node_will_make_reachable(b) &&
+ !btree_node_write_blocked(b) &&
+ six_trylock_read(&b->c.lock)) {
+ list_move(&bc->live, &b->list);
+ mutex_unlock(&bc->lock);
+ __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
+ six_unlock_read(&b->c.lock);
+ if (touched >= nr)
+ goto out_nounlock;
+ mutex_lock(&bc->lock);
+ goto restart;
+ }
+
+ if (touched >= nr)
+ break;
+ }
+out_rotate:
+ if (&t->list != &bc->live)
+ list_move_tail(&bc->live, &t->list);
+out:
+ mutex_unlock(&bc->lock);
+out_nounlock:
+ ret = freed;
+ memalloc_nofs_restore(flags);
+ trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret);
+ return ret;
+}
+
+static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct bch_fs *c = container_of(shrink, struct bch_fs,
+ btree_cache.shrink);
+ struct btree_cache *bc = &c->btree_cache;
+
+ if (bch2_btree_shrinker_disabled)
+ return 0;
+
+ return btree_cache_can_free(bc);
+}
+
+void bch2_fs_btree_cache_exit(struct bch_fs *c)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+ unsigned i, flags;
+
+ unregister_shrinker(&bc->shrink);
+
+ /* vfree() can allocate memory: */
+ flags = memalloc_nofs_save();
+ mutex_lock(&bc->lock);
+
+ if (c->verify_data)
+ list_move(&c->verify_data->list, &bc->live);
+
+ kvpfree(c->verify_ondisk, btree_bytes(c));
+
+ for (i = 0; i < btree_id_nr_alive(c); i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (r->b)
+ list_add(&r->b->list, &bc->live);
+ }
+
+ list_splice(&bc->freeable, &bc->live);
+
+ while (!list_empty(&bc->live)) {
+ b = list_first_entry(&bc->live, struct btree, list);
+
+ BUG_ON(btree_node_read_in_flight(b) ||
+ btree_node_write_in_flight(b));
+
+ if (btree_node_dirty(b))
+ bch2_btree_complete_write(c, b, btree_current_write(b));
+ clear_btree_node_dirty_acct(c, b);
+
+ btree_node_data_free(c, b);
+ }
+
+ BUG_ON(atomic_read(&c->btree_cache.dirty));
+
+ list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
+
+ while (!list_empty(&bc->freed_nonpcpu)) {
+ b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
+ list_del(&b->list);
+ six_lock_exit(&b->c.lock);
+ kfree(b);
+ }
+
+ mutex_unlock(&bc->lock);
+ memalloc_nofs_restore(flags);
+
+ if (bc->table_init_done)
+ rhashtable_destroy(&bc->table);
+}
+
+int bch2_fs_btree_cache_init(struct bch_fs *c)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ unsigned i;
+ int ret = 0;
+
+ ret = rhashtable_init(&bc->table, &bch_btree_cache_params);
+ if (ret)
+ goto err;
+
+ bc->table_init_done = true;
+
+ bch2_recalc_btree_reserve(c);
+
+ for (i = 0; i < bc->reserve; i++)
+ if (!__bch2_btree_node_mem_alloc(c))
+ goto err;
+
+ list_splice_init(&bc->live, &bc->freeable);
+
+ mutex_init(&c->verify_lock);
+
+ bc->shrink.count_objects = bch2_btree_cache_count;
+ bc->shrink.scan_objects = bch2_btree_cache_scan;
+ bc->shrink.seeks = 4;
+ ret = register_shrinker(&bc->shrink, "%s/btree_cache", c->name);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ return -BCH_ERR_ENOMEM_fs_btree_cache_init;
+}
+
+void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
+{
+ mutex_init(&bc->lock);
+ INIT_LIST_HEAD(&bc->live);
+ INIT_LIST_HEAD(&bc->freeable);
+ INIT_LIST_HEAD(&bc->freed_pcpu);
+ INIT_LIST_HEAD(&bc->freed_nonpcpu);
+}
+
+/*
+ * We can only have one thread cannibalizing other cached btree nodes at a time,
+ * or we'll deadlock. We use an open coded mutex to ensure that, which a
+ * cannibalize_bucket() will take. This means every time we unlock the root of
+ * the btree, we need to release this lock if we have it held.
+ */
+void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c)
+{
+ struct btree_cache *bc = &c->btree_cache;
+
+ if (bc->alloc_lock == current) {
+ trace_and_count(c, btree_cache_cannibalize_unlock, c);
+ bc->alloc_lock = NULL;
+ closure_wake_up(&bc->alloc_wait);
+ }
+}
+
+int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ struct task_struct *old;
+
+ old = cmpxchg(&bc->alloc_lock, NULL, current);
+ if (old == NULL || old == current)
+ goto success;
+
+ if (!cl) {
+ trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
+ return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
+ }
+
+ closure_wait(&bc->alloc_wait, cl);
+
+ /* Try again, after adding ourselves to waitlist */
+ old = cmpxchg(&bc->alloc_lock, NULL, current);
+ if (old == NULL || old == current) {
+ /* We raced */
+ closure_wake_up(&bc->alloc_wait);
+ goto success;
+ }
+
+ trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
+ return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
+
+success:
+ trace_and_count(c, btree_cache_cannibalize_lock, c);
+ return 0;
+}
+
+static struct btree *btree_node_cannibalize(struct bch_fs *c)
+{
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+
+ list_for_each_entry_reverse(b, &bc->live, list)
+ if (!btree_node_reclaim(c, b))
+ return b;
+
+ while (1) {
+ list_for_each_entry_reverse(b, &bc->live, list)
+ if (!btree_node_write_and_reclaim(c, b))
+ return b;
+
+ /*
+ * Rare case: all nodes were intent-locked.
+ * Just busy-wait.
+ */
+ WARN_ONCE(1, "btree cache cannibalize failed\n");
+ cond_resched();
+ }
+}
+
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_cache *bc = &c->btree_cache;
+ struct list_head *freed = pcpu_read_locks
+ ? &bc->freed_pcpu
+ : &bc->freed_nonpcpu;
+ struct btree *b, *b2;
+ u64 start_time = local_clock();
+ unsigned flags;
+
+ flags = memalloc_nofs_save();
+ mutex_lock(&bc->lock);
+
+ /*
+ * We never free struct btree itself, just the memory that holds the on
+ * disk node. Check the freed list before allocating a new one:
+ */
+ list_for_each_entry(b, freed, list)
+ if (!btree_node_reclaim(c, b)) {
+ list_del_init(&b->list);
+ goto got_node;
+ }
+
+ b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
+ if (!b) {
+ mutex_unlock(&bc->lock);
+ bch2_trans_unlock(trans);
+ b = __btree_node_mem_alloc(c, GFP_KERNEL);
+ if (!b)
+ goto err;
+ mutex_lock(&bc->lock);
+ }
+
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
+
+ BUG_ON(!six_trylock_intent(&b->c.lock));
+ BUG_ON(!six_trylock_write(&b->c.lock));
+got_node:
+
+ /*
+ * btree_free() doesn't free memory; it sticks the node on the end of
+ * the list. Check if there's any freed nodes there:
+ */
+ list_for_each_entry(b2, &bc->freeable, list)
+ if (!btree_node_reclaim(c, b2)) {
+ swap(b->data, b2->data);
+ swap(b->aux_data, b2->aux_data);
+ btree_node_to_freedlist(bc, b2);
+ six_unlock_write(&b2->c.lock);
+ six_unlock_intent(&b2->c.lock);
+ goto got_mem;
+ }
+
+ mutex_unlock(&bc->lock);
+
+ if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) {
+ bch2_trans_unlock(trans);
+ if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))
+ goto err;
+ }
+
+ mutex_lock(&bc->lock);
+ bc->used++;
+got_mem:
+ mutex_unlock(&bc->lock);
+
+ BUG_ON(btree_node_hashed(b));
+ BUG_ON(btree_node_dirty(b));
+ BUG_ON(btree_node_write_in_flight(b));
+out:
+ b->flags = 0;
+ b->written = 0;
+ b->nsets = 0;
+ b->sib_u64s[0] = 0;
+ b->sib_u64s[1] = 0;
+ b->whiteout_u64s = 0;
+ bch2_btree_keys_init(b);
+ set_btree_node_accessed(b);
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
+ start_time);
+
+ memalloc_nofs_restore(flags);
+ return b;
+err:
+ mutex_lock(&bc->lock);
+
+ /* Try to cannibalize another cached btree node: */
+ if (bc->alloc_lock == current) {
+ b2 = btree_node_cannibalize(c);
+ clear_btree_node_just_written(b2);
+ bch2_btree_node_hash_remove(bc, b2);
+
+ if (b) {
+ swap(b->data, b2->data);
+ swap(b->aux_data, b2->aux_data);
+ btree_node_to_freedlist(bc, b2);
+ six_unlock_write(&b2->c.lock);
+ six_unlock_intent(&b2->c.lock);
+ } else {
+ b = b2;
+ list_del_init(&b->list);
+ }
+
+ mutex_unlock(&bc->lock);
+
+ trace_and_count(c, btree_cache_cannibalize, c);
+ goto out;
+ }
+
+ mutex_unlock(&bc->lock);
+ memalloc_nofs_restore(flags);
+ return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
+}
+
+/* Slowpath, don't want it inlined into btree_iter_traverse() */
+static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
+ struct btree_path *path,
+ const struct bkey_i *k,
+ enum btree_id btree_id,
+ unsigned level,
+ enum six_lock_type lock_type,
+ bool sync)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+ u32 seq;
+
+ BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
+ /*
+ * Parent node must be locked, else we could read in a btree node that's
+ * been freed:
+ */
+ if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
+ trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
+ }
+
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
+
+ if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
+ trans->memory_allocation_failure = true;
+ trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
+ }
+
+ if (IS_ERR(b))
+ return b;
+
+ /*
+ * Btree nodes read in from disk should not have the accessed bit set
+ * initially, so that linear scans don't thrash the cache:
+ */
+ clear_btree_node_accessed(b);
+
+ bkey_copy(&b->key, k);
+ if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
+ /* raced with another fill: */
+
+ /* mark as unhashed... */
+ b->hash_val = 0;
+
+ mutex_lock(&bc->lock);
+ list_add(&b->list, &bc->freeable);
+ mutex_unlock(&bc->lock);
+
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ return NULL;
+ }
+
+ set_btree_node_read_in_flight(b);
+
+ six_unlock_write(&b->c.lock);
+ seq = six_lock_seq(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+
+ /* Unlock before doing IO: */
+ if (path && sync)
+ bch2_trans_unlock_noassert(trans);
+
+ bch2_btree_node_read(c, b, sync);
+
+ if (!sync)
+ return NULL;
+
+ if (path) {
+ int ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ BUG_ON(!trans->restarted);
+ return ERR_PTR(ret);
+ }
+ }
+
+ if (!six_relock_type(&b->c.lock, lock_type, seq)) {
+ if (path)
+ trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
+ }
+
+ return b;
+}
+
+static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
+{
+ struct printbuf buf = PRINTBUF;
+
+ if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
+ return;
+
+ prt_printf(&buf,
+ "btree node header doesn't match ptr\n"
+ "btree %s level %u\n"
+ "ptr: ",
+ bch2_btree_ids[b->c.btree_id], b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ prt_printf(&buf, "\nheader: btree %s level %llu\n"
+ "min ",
+ bch2_btree_ids[BTREE_NODE_ID(b->data)],
+ BTREE_NODE_LEVEL(b->data));
+ bch2_bpos_to_text(&buf, b->data->min_key);
+
+ prt_printf(&buf, "\nmax ");
+ bch2_bpos_to_text(&buf, b->data->max_key);
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+}
+
+static inline void btree_check_header(struct bch_fs *c, struct btree *b)
+{
+ if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
+ b->c.level != BTREE_NODE_LEVEL(b->data) ||
+ !bpos_eq(b->data->max_key, b->key.k.p) ||
+ (b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ !bpos_eq(b->data->min_key,
+ bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
+ btree_bad_header(c, b);
+}
+
+static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
+ const struct bkey_i *k, unsigned level,
+ enum six_lock_type lock_type,
+ unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+ struct bset_tree *t;
+ bool need_relock = false;
+ int ret;
+
+ EBUG_ON(level >= BTREE_MAX_DEPTH);
+retry:
+ b = btree_cache_find(bc, k);
+ if (unlikely(!b)) {
+ /*
+ * We must have the parent locked to call bch2_btree_node_fill(),
+ * else we could read in a btree node from disk that's been
+ * freed:
+ */
+ b = bch2_btree_node_fill(trans, path, k, path->btree_id,
+ level, lock_type, true);
+ need_relock = true;
+
+ /* We raced and found the btree node in the cache */
+ if (!b)
+ goto retry;
+
+ if (IS_ERR(b))
+ return b;
+ } else {
+ if (btree_node_read_locked(path, level + 1))
+ btree_node_unlock(trans, path, level + 1);
+
+ ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ERR_PTR(ret);
+
+ BUG_ON(ret);
+
+ if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
+ b->c.level != level ||
+ race_fault())) {
+ six_unlock_type(&b->c.lock, lock_type);
+ if (bch2_btree_node_relock(trans, path, level + 1))
+ goto retry;
+
+ trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
+ }
+
+ /* avoid atomic set bit if it's not needed: */
+ if (!btree_node_accessed(b))
+ set_btree_node_accessed(b);
+ }
+
+ if (unlikely(btree_node_read_in_flight(b))) {
+ u32 seq = six_lock_seq(&b->c.lock);
+
+ six_unlock_type(&b->c.lock, lock_type);
+ bch2_trans_unlock(trans);
+ need_relock = true;
+
+ bch2_btree_node_wait_on_read(b);
+
+ /*
+ * should_be_locked is not set on this path yet, so we need to
+ * relock it specifically:
+ */
+ if (!six_relock_type(&b->c.lock, lock_type, seq))
+ goto retry;
+ }
+
+ if (unlikely(need_relock)) {
+ ret = bch2_trans_relock(trans) ?:
+ bch2_btree_path_relock_intent(trans, path);
+ if (ret) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return ERR_PTR(ret);
+ }
+ }
+
+ prefetch(b->aux_data);
+
+ for_each_bset(b, t) {
+ void *p = (u64 *) b->aux_data + t->aux_data_offset;
+
+ prefetch(p + L1_CACHE_BYTES * 0);
+ prefetch(p + L1_CACHE_BYTES * 1);
+ prefetch(p + L1_CACHE_BYTES * 2);
+ }
+
+ if (unlikely(btree_node_read_error(b))) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return ERR_PTR(-EIO);
+ }
+
+ EBUG_ON(b->c.btree_id != path->btree_id);
+ EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
+ btree_check_header(c, b);
+
+ return b;
+}
+
+/**
+ * bch2_btree_node_get - find a btree node in the cache and lock it, reading it
+ * in from disk if necessary.
+ *
+ * @trans: btree transaction object
+ * @path: btree_path being traversed
+ * @k: pointer to btree node (generally KEY_TYPE_btree_ptr_v2)
+ * @level: level of btree node being looked up (0 == leaf node)
+ * @lock_type: SIX_LOCK_read or SIX_LOCK_intent
+ * @trace_ip: ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek())
+ *
+ * The btree node will have either a read or a write lock held, depending on
+ * the @write parameter.
+ *
+ * Returns: btree node or ERR_PTR()
+ */
+struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
+ const struct bkey_i *k, unsigned level,
+ enum six_lock_type lock_type,
+ unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b;
+ struct bset_tree *t;
+ int ret;
+
+ EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+ b = btree_node_mem_ptr(k);
+
+ /*
+ * Check b->hash_val _before_ calling btree_node_lock() - this might not
+ * be the node we want anymore, and trying to lock the wrong node could
+ * cause an unneccessary transaction restart:
+ */
+ if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
+ !b ||
+ b->hash_val != btree_ptr_hash_val(k)))
+ return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+
+ if (btree_node_read_locked(path, level + 1))
+ btree_node_unlock(trans, path, level + 1);
+
+ ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ERR_PTR(ret);
+
+ BUG_ON(ret);
+
+ if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
+ b->c.level != level ||
+ race_fault())) {
+ six_unlock_type(&b->c.lock, lock_type);
+ if (bch2_btree_node_relock(trans, path, level + 1))
+ return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+
+ trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
+ }
+
+ if (unlikely(btree_node_read_in_flight(b))) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+ }
+
+ prefetch(b->aux_data);
+
+ for_each_bset(b, t) {
+ void *p = (u64 *) b->aux_data + t->aux_data_offset;
+
+ prefetch(p + L1_CACHE_BYTES * 0);
+ prefetch(p + L1_CACHE_BYTES * 1);
+ prefetch(p + L1_CACHE_BYTES * 2);
+ }
+
+ /* avoid atomic set bit if it's not needed: */
+ if (!btree_node_accessed(b))
+ set_btree_node_accessed(b);
+
+ if (unlikely(btree_node_read_error(b))) {
+ six_unlock_type(&b->c.lock, lock_type);
+ return ERR_PTR(-EIO);
+ }
+
+ EBUG_ON(b->c.btree_id != path->btree_id);
+ EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
+ btree_check_header(c, b);
+
+ return b;
+}
+
+struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
+ const struct bkey_i *k,
+ enum btree_id btree_id,
+ unsigned level,
+ bool nofill)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+ struct bset_tree *t;
+ int ret;
+
+ EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+ if (c->opts.btree_node_mem_ptr_optimization) {
+ b = btree_node_mem_ptr(k);
+ if (b)
+ goto lock_node;
+ }
+retry:
+ b = btree_cache_find(bc, k);
+ if (unlikely(!b)) {
+ if (nofill)
+ goto out;
+
+ b = bch2_btree_node_fill(trans, NULL, k, btree_id,
+ level, SIX_LOCK_read, true);
+
+ /* We raced and found the btree node in the cache */
+ if (!b)
+ goto retry;
+
+ if (IS_ERR(b) &&
+ !bch2_btree_cache_cannibalize_lock(c, NULL))
+ goto retry;
+
+ if (IS_ERR(b))
+ goto out;
+ } else {
+lock_node:
+ ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ERR_PTR(ret);
+
+ BUG_ON(ret);
+
+ if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
+ b->c.btree_id != btree_id ||
+ b->c.level != level)) {
+ six_unlock_read(&b->c.lock);
+ goto retry;
+ }
+ }
+
+ /* XXX: waiting on IO with btree locks held: */
+ __bch2_btree_node_wait_on_read(b);
+
+ prefetch(b->aux_data);
+
+ for_each_bset(b, t) {
+ void *p = (u64 *) b->aux_data + t->aux_data_offset;
+
+ prefetch(p + L1_CACHE_BYTES * 0);
+ prefetch(p + L1_CACHE_BYTES * 1);
+ prefetch(p + L1_CACHE_BYTES * 2);
+ }
+
+ /* avoid atomic set bit if it's not needed: */
+ if (!btree_node_accessed(b))
+ set_btree_node_accessed(b);
+
+ if (unlikely(btree_node_read_error(b))) {
+ six_unlock_read(&b->c.lock);
+ b = ERR_PTR(-EIO);
+ goto out;
+ }
+
+ EBUG_ON(b->c.btree_id != btree_id);
+ EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
+ btree_check_header(c, b);
+out:
+ bch2_btree_cache_cannibalize_unlock(c);
+ return b;
+}
+
+int bch2_btree_node_prefetch(struct btree_trans *trans,
+ struct btree_path *path,
+ const struct bkey_i *k,
+ enum btree_id btree_id, unsigned level)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+
+ BUG_ON(trans && !btree_node_locked(path, level + 1));
+ BUG_ON(level >= BTREE_MAX_DEPTH);
+
+ b = btree_cache_find(bc, k);
+ if (b)
+ return 0;
+
+ b = bch2_btree_node_fill(trans, path, k, btree_id,
+ level, SIX_LOCK_read, false);
+ return PTR_ERR_OR_ZERO(b);
+}
+
+void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_cache *bc = &c->btree_cache;
+ struct btree *b;
+
+ b = btree_cache_find(bc, k);
+ if (!b)
+ return;
+wait_on_io:
+ /* not allowed to wait on io with btree locks held: */
+
+ /* XXX we're called from btree_gc which will be holding other btree
+ * nodes locked
+ */
+ __bch2_btree_node_wait_on_read(b);
+ __bch2_btree_node_wait_on_write(b);
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
+
+ if (btree_node_dirty(b)) {
+ __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ goto wait_on_io;
+ }
+
+ BUG_ON(btree_node_dirty(b));
+
+ mutex_lock(&bc->lock);
+ btree_node_data_free(c, b);
+ bch2_btree_node_hash_remove(bc, b);
+ mutex_unlock(&bc->lock);
+
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+}
+
+void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
+ const struct btree *b)
+{
+ struct bset_stats stats;
+
+ memset(&stats, 0, sizeof(stats));
+
+ bch2_btree_keys_stats(b, &stats);
+
+ prt_printf(out, "l %u ", b->c.level);
+ bch2_bpos_to_text(out, b->data->min_key);
+ prt_printf(out, " - ");
+ bch2_bpos_to_text(out, b->data->max_key);
+ prt_printf(out, ":\n"
+ " ptrs: ");
+ bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+ prt_newline(out);
+
+ prt_printf(out,
+ " format: ");
+ bch2_bkey_format_to_text(out, &b->format);
+
+ prt_printf(out,
+ " unpack fn len: %u\n"
+ " bytes used %zu/%zu (%zu%% full)\n"
+ " sib u64s: %u, %u (merge threshold %u)\n"
+ " nr packed keys %u\n"
+ " nr unpacked keys %u\n"
+ " floats %zu\n"
+ " failed unpacked %zu\n",
+ b->unpack_fn_len,
+ b->nr.live_u64s * sizeof(u64),
+ btree_bytes(c) - sizeof(struct btree_node),
+ b->nr.live_u64s * 100 / btree_max_u64s(c),
+ b->sib_u64s[0],
+ b->sib_u64s[1],
+ c->btree_foreground_merge_threshold,
+ b->nr.packed_keys,
+ b->nr.unpacked_keys,
+ stats.floats,
+ stats.failed);
+}
+
+void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c)
+{
+ prt_printf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
+ prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty));
+ prt_printf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
+}
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
new file mode 100644
index 000000000000..1e562b6efa62
--- /dev/null
+++ b/fs/bcachefs/btree_cache.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_CACHE_H
+#define _BCACHEFS_BTREE_CACHE_H
+
+#include "bcachefs.h"
+#include "btree_types.h"
+#include "bkey_methods.h"
+
+extern const char * const bch2_btree_node_flags[];
+
+struct btree_iter;
+
+void bch2_recalc_btree_reserve(struct bch_fs *);
+
+void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);
+int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
+int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
+ unsigned, enum btree_id);
+
+void bch2_btree_cache_cannibalize_unlock(struct bch_fs *);
+int bch2_btree_cache_cannibalize_lock(struct bch_fs *, struct closure *);
+
+struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *);
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *, bool);
+
+struct btree *bch2_btree_node_get(struct btree_trans *, struct btree_path *,
+ const struct bkey_i *, unsigned,
+ enum six_lock_type, unsigned long);
+
+struct btree *bch2_btree_node_get_noiter(struct btree_trans *, const struct bkey_i *,
+ enum btree_id, unsigned, bool);
+
+int bch2_btree_node_prefetch(struct btree_trans *, struct btree_path *,
+ const struct bkey_i *, enum btree_id, unsigned);
+
+void bch2_btree_node_evict(struct btree_trans *, const struct bkey_i *);
+
+void bch2_fs_btree_cache_exit(struct bch_fs *);
+int bch2_fs_btree_cache_init(struct bch_fs *);
+void bch2_fs_btree_cache_init_early(struct btree_cache *);
+
+static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
+{
+ switch (k->k.type) {
+ case KEY_TYPE_btree_ptr:
+ return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
+ case KEY_TYPE_btree_ptr_v2:
+ /*
+ * The cast/deref is only necessary to avoid sparse endianness
+ * warnings:
+ */
+ return *((u64 *) &bkey_i_to_btree_ptr_v2_c(k)->v.seq);
+ default:
+ return 0;
+ }
+}
+
+static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k)
+{
+ return k->k.type == KEY_TYPE_btree_ptr_v2
+ ? (void *)(unsigned long)bkey_i_to_btree_ptr_v2_c(k)->v.mem_ptr
+ : NULL;
+}
+
+/* is btree node in hash table? */
+static inline bool btree_node_hashed(struct btree *b)
+{
+ return b->hash_val != 0;
+}
+
+#define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
+ for ((_tbl) = rht_dereference_rcu((_c)->btree_cache.table.tbl, \
+ &(_c)->btree_cache.table), \
+ _iter = 0; _iter < (_tbl)->size; _iter++) \
+ rht_for_each_entry_rcu((_b), (_pos), _tbl, _iter, hash)
+
+static inline size_t btree_bytes(struct bch_fs *c)
+{
+ return c->opts.btree_node_size;
+}
+
+static inline size_t btree_max_u64s(struct bch_fs *c)
+{
+ return (btree_bytes(c) - sizeof(struct btree_node)) / sizeof(u64);
+}
+
+static inline size_t btree_pages(struct bch_fs *c)
+{
+ return btree_bytes(c) / PAGE_SIZE;
+}
+
+static inline unsigned btree_blocks(struct bch_fs *c)
+{
+ return btree_sectors(c) >> c->block_bits;
+}
+
+#define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)
+
+#define BTREE_FOREGROUND_MERGE_THRESHOLD(c) (btree_max_u64s(c) * 1 / 3)
+#define BTREE_FOREGROUND_MERGE_HYSTERESIS(c) \
+ (BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
+ (BTREE_FOREGROUND_MERGE_THRESHOLD(c) >> 2))
+
+static inline unsigned btree_id_nr_alive(struct bch_fs *c)
+{
+ return BTREE_ID_NR + c->btree_roots_extra.nr;
+}
+
+static inline struct btree_root *bch2_btree_id_root(struct bch_fs *c, unsigned id)
+{
+ if (likely(id < BTREE_ID_NR)) {
+ return &c->btree_roots_known[id];
+ } else {
+ unsigned idx = id - BTREE_ID_NR;
+
+ EBUG_ON(idx >= c->btree_roots_extra.nr);
+ return &c->btree_roots_extra.data[idx];
+ }
+}
+
+static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
+{
+ return bch2_btree_id_root(c, b->c.btree_id)->b;
+}
+
+void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
+ const struct btree *);
+void bch2_btree_cache_to_text(struct printbuf *, const struct bch_fs *);
+
+#endif /* _BCACHEFS_BTREE_CACHE_H */
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
new file mode 100644
index 000000000000..693ed067b1a7
--- /dev/null
+++ b/fs/bcachefs/btree_gc.c
@@ -0,0 +1,2111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright (C) 2014 Datera Inc.
+ */
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "bkey_methods.h"
+#include "bkey_buf.h"
+#include "btree_journal_iter.h"
+#include "btree_key_cache.h"
+#include "btree_locking.h"
+#include "btree_update_interior.h"
+#include "btree_io.h"
+#include "btree_gc.h"
+#include "buckets.h"
+#include "clock.h"
+#include "debug.h"
+#include "ec.h"
+#include "error.h"
+#include "extents.h"
+#include "journal.h"
+#include "keylist.h"
+#include "move.h"
+#include "recovery.h"
+#include "reflink.h"
+#include "replicas.h"
+#include "super-io.h"
+#include "trace.h"
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/task.h>
+
+#define DROP_THIS_NODE 10
+#define DROP_PREV_NODE 11
+
+static bool should_restart_for_topology_repair(struct bch_fs *c)
+{
+ return c->opts.fix_errors != FSCK_FIX_no &&
+ !(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
+}
+
+static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
+{
+ preempt_disable();
+ write_seqcount_begin(&c->gc_pos_lock);
+ c->gc_pos = new_pos;
+ write_seqcount_end(&c->gc_pos_lock);
+ preempt_enable();
+}
+
+static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
+{
+ BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
+ __gc_pos_set(c, new_pos);
+}
+
+/*
+ * Missing: if an interior btree node is empty, we need to do something -
+ * perhaps just kill it
+ */
+static int bch2_gc_check_topology(struct bch_fs *c,
+ struct btree *b,
+ struct bkey_buf *prev,
+ struct bkey_buf cur,
+ bool is_last)
+{
+ struct bpos node_start = b->data->min_key;
+ struct bpos node_end = b->data->max_key;
+ struct bpos expected_start = bkey_deleted(&prev->k->k)
+ ? node_start
+ : bpos_successor(prev->k->k.p);
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ int ret = 0;
+
+ if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
+
+ if (!bpos_eq(expected_start, bp->v.min_key)) {
+ bch2_topology_error(c);
+
+ if (bkey_deleted(&prev->k->k)) {
+ prt_printf(&buf1, "start of node: ");
+ bch2_bpos_to_text(&buf1, node_start);
+ } else {
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
+ }
+ bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
+
+ if (__fsck_err(c,
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ "btree node with incorrect min_key at btree %s level %u:\n"
+ " prev %s\n"
+ " cur %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf, buf2.buf) &&
+ should_restart_for_topology_repair(c)) {
+ bch_info(c, "Halting mark and sweep to start topology repair pass");
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
+ goto err;
+ } else {
+ set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ }
+ }
+ }
+
+ if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
+ bch2_topology_error(c);
+
+ printbuf_reset(&buf1);
+ printbuf_reset(&buf2);
+
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
+ bch2_bpos_to_text(&buf2, node_end);
+
+ if (__fsck_err(c,
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ "btree node with incorrect max_key at btree %s level %u:\n"
+ " %s\n"
+ " expected %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf, buf2.buf) &&
+ should_restart_for_topology_repair(c)) {
+ bch_info(c, "Halting mark and sweep to start topology repair pass");
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
+ goto err;
+ } else {
+ set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ }
+ }
+
+ bch2_bkey_buf_copy(prev, c, cur.k);
+err:
+fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ return ret;
+}
+
+static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
+{
+ switch (b->key.k.type) {
+ case KEY_TYPE_btree_ptr: {
+ struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
+
+ dst->k.p = src->k.p;
+ dst->v.mem_ptr = 0;
+ dst->v.seq = b->data->keys.seq;
+ dst->v.sectors_written = 0;
+ dst->v.flags = 0;
+ dst->v.min_key = b->data->min_key;
+ set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
+ memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
+ break;
+ }
+ case KEY_TYPE_btree_ptr_v2:
+ bkey_copy(&dst->k_i, &b->key);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void bch2_btree_node_update_key_early(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b;
+ struct bkey_buf tmp;
+ int ret;
+
+ bch2_bkey_buf_init(&tmp);
+ bch2_bkey_buf_reassemble(&tmp, c, old);
+
+ b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true);
+ if (!IS_ERR_OR_NULL(b)) {
+ mutex_lock(&c->btree_cache.lock);
+
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ bkey_copy(&b->key, new);
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
+ BUG_ON(ret);
+
+ mutex_unlock(&c->btree_cache.lock);
+ six_unlock_read(&b->c.lock);
+ }
+
+ bch2_bkey_buf_exit(&tmp, c);
+}
+
+static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
+{
+ struct bkey_i_btree_ptr_v2 *new;
+ int ret;
+
+ new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
+ if (!new)
+ return -BCH_ERR_ENOMEM_gc_repair_key;
+
+ btree_ptr_to_v2(b, new);
+ b->data->min_key = new_min;
+ new->v.min_key = new_min;
+ SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
+
+ ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
+ if (ret) {
+ kfree(new);
+ return ret;
+ }
+
+ bch2_btree_node_drop_keys_outside_node(b);
+ bkey_copy(&b->key, &new->k_i);
+ return 0;
+}
+
+static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
+{
+ struct bkey_i_btree_ptr_v2 *new;
+ int ret;
+
+ ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
+ if (ret)
+ return ret;
+
+ new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
+ if (!new)
+ return -BCH_ERR_ENOMEM_gc_repair_key;
+
+ btree_ptr_to_v2(b, new);
+ b->data->max_key = new_max;
+ new->k.p = new_max;
+ SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
+
+ ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
+ if (ret) {
+ kfree(new);
+ return ret;
+ }
+
+ bch2_btree_node_drop_keys_outside_node(b);
+
+ mutex_lock(&c->btree_cache.lock);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ bkey_copy(&b->key, &new->k_i);
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
+ BUG_ON(ret);
+ mutex_unlock(&c->btree_cache.lock);
+ return 0;
+}
+
+static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
+ struct btree *prev, struct btree *cur)
+{
+ struct bpos expected_start = !prev
+ ? b->data->min_key
+ : bpos_successor(prev->key.k.p);
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ int ret = 0;
+
+ if (!prev) {
+ prt_printf(&buf1, "start of node: ");
+ bch2_bpos_to_text(&buf1, b->data->min_key);
+ } else {
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key));
+ }
+
+ bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
+
+ if (prev &&
+ bpos_gt(expected_start, cur->data->min_key) &&
+ BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
+ /* cur overwrites prev: */
+
+ if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
+ cur->data->min_key), c,
+ "btree node overwritten by next node at btree %s level %u:\n"
+ " node %s\n"
+ " next %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf, buf2.buf)) {
+ ret = DROP_PREV_NODE;
+ goto out;
+ }
+
+ if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
+ bpos_predecessor(cur->data->min_key)), c,
+ "btree node with incorrect max_key at btree %s level %u:\n"
+ " node %s\n"
+ " next %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf, buf2.buf))
+ ret = set_node_max(c, prev,
+ bpos_predecessor(cur->data->min_key));
+ } else {
+ /* prev overwrites cur: */
+
+ if (mustfix_fsck_err_on(bpos_ge(expected_start,
+ cur->data->max_key), c,
+ "btree node overwritten by prev node at btree %s level %u:\n"
+ " prev %s\n"
+ " node %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf, buf2.buf)) {
+ ret = DROP_THIS_NODE;
+ goto out;
+ }
+
+ if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
+ "btree node with incorrect min_key at btree %s level %u:\n"
+ " prev %s\n"
+ " node %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf, buf2.buf))
+ ret = set_node_min(c, cur, expected_start);
+ }
+out:
+fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ return ret;
+}
+
+static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
+ struct btree *child)
+{
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ int ret = 0;
+
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
+ bch2_bpos_to_text(&buf2, b->key.k.p);
+
+ if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
+ "btree node with incorrect max_key at btree %s level %u:\n"
+ " %s\n"
+ " expected %s",
+ bch2_btree_ids[b->c.btree_id], b->c.level,
+ buf1.buf, buf2.buf)) {
+ ret = set_node_max(c, child, b->key.k.p);
+ if (ret)
+ goto err;
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ return ret;
+}
+
+static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf prev_k, cur_k;
+ struct btree *prev = NULL, *cur = NULL;
+ bool have_child, dropped_children = false;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (!b->c.level)
+ return 0;
+again:
+ prev = NULL;
+ have_child = dropped_children = false;
+ bch2_bkey_buf_init(&prev_k);
+ bch2_bkey_buf_init(&cur_k);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+ BUG_ON(bpos_gt(k.k->p, b->data->max_key));
+
+ bch2_btree_and_journal_iter_advance(&iter);
+ bch2_bkey_buf_reassemble(&cur_k, c, k);
+
+ cur = bch2_btree_node_get_noiter(trans, cur_k.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+ ret = PTR_ERR_OR_ZERO(cur);
+
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
+
+ if (mustfix_fsck_err_on(ret == -EIO, c,
+ "Topology repair: unreadable btree node at btree %s level %u:\n"
+ " %s",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level - 1,
+ buf.buf)) {
+ bch2_btree_node_evict(trans, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ cur = NULL;
+ if (ret)
+ break;
+ continue;
+ }
+
+ if (ret) {
+ bch_err_msg(c, ret, "getting btree node");
+ break;
+ }
+
+ ret = btree_repair_node_boundaries(c, b, prev, cur);
+
+ if (ret == DROP_THIS_NODE) {
+ six_unlock_read(&cur->c.lock);
+ bch2_btree_node_evict(trans, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ cur = NULL;
+ if (ret)
+ break;
+ continue;
+ }
+
+ if (prev)
+ six_unlock_read(&prev->c.lock);
+ prev = NULL;
+
+ if (ret == DROP_PREV_NODE) {
+ bch2_btree_node_evict(trans, prev_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, prev_k.k->k.p);
+ if (ret)
+ break;
+
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_bkey_buf_exit(&prev_k, c);
+ bch2_bkey_buf_exit(&cur_k, c);
+ goto again;
+ } else if (ret)
+ break;
+
+ prev = cur;
+ cur = NULL;
+ bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
+ }
+
+ if (!ret && !IS_ERR_OR_NULL(prev)) {
+ BUG_ON(cur);
+ ret = btree_repair_node_end(c, b, prev);
+ }
+
+ if (!IS_ERR_OR_NULL(prev))
+ six_unlock_read(&prev->c.lock);
+ prev = NULL;
+ if (!IS_ERR_OR_NULL(cur))
+ six_unlock_read(&cur->c.lock);
+ cur = NULL;
+
+ if (ret)
+ goto err;
+
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ bch2_bkey_buf_reassemble(&cur_k, c, k);
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ cur = bch2_btree_node_get_noiter(trans, cur_k.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+ ret = PTR_ERR_OR_ZERO(cur);
+
+ if (ret) {
+ bch_err_msg(c, ret, "getting btree node");
+ goto err;
+ }
+
+ ret = bch2_btree_repair_topology_recurse(trans, cur);
+ six_unlock_read(&cur->c.lock);
+ cur = NULL;
+
+ if (ret == DROP_THIS_NODE) {
+ bch2_btree_node_evict(trans, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ dropped_children = true;
+ }
+
+ if (ret)
+ goto err;
+
+ have_child = true;
+ }
+
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ if (mustfix_fsck_err_on(!have_child, c,
+ "empty interior btree node at btree %s level %u\n"
+ " %s",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level, buf.buf))
+ ret = DROP_THIS_NODE;
+err:
+fsck_err:
+ if (!IS_ERR_OR_NULL(prev))
+ six_unlock_read(&prev->c.lock);
+ if (!IS_ERR_OR_NULL(cur))
+ six_unlock_read(&cur->c.lock);
+
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_bkey_buf_exit(&prev_k, c);
+ bch2_bkey_buf_exit(&cur_k, c);
+
+ if (!ret && dropped_children)
+ goto again;
+
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_check_topology(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree *b;
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (!r->alive)
+ continue;
+
+ b = r->b;
+ if (btree_node_fake(b))
+ continue;
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
+ ret = bch2_btree_repair_topology_recurse(trans, b);
+ six_unlock_read(&b->c.lock);
+
+ if (ret == DROP_THIS_NODE) {
+ bch_err(c, "empty btree root - repair unimplemented");
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ }
+ }
+
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id,
+ unsigned level, bool is_root,
+ struct bkey_s_c *k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k);
+ const union bch_extent_entry *entry_c;
+ struct extent_ptr_decoded p = { 0 };
+ bool do_update = false;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ /*
+ * XXX
+ * use check_bucket_ref here
+ */
+ bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr);
+
+ if (!g->gen_valid &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
+ if (!p.ptr.cached) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ } else {
+ do_update = true;
+ }
+ }
+
+ if (gen_cmp(p.ptr.gen, g->gen) > 0 &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))) {
+ if (!p.ptr.cached) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ } else {
+ do_update = true;
+ }
+ }
+
+ if (gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
+ do_update = true;
+
+ if (!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0 &&
+ (c->opts.reconstruct_alloc ||
+ fsck_err(c, "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[ptr_data_type(k->k, &p.ptr)],
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))))
+ do_update = true;
+
+ if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
+ continue;
+
+ if (fsck_err_on(bucket_data_type(g->data_type) &&
+ bucket_data_type(g->data_type) != data_type, c,
+ "bucket %u:%zu different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_types[g->data_type],
+ bch2_data_types[data_type],
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
+ if (data_type == BCH_DATA_btree) {
+ g->data_type = data_type;
+ set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ } else {
+ do_update = true;
+ }
+ }
+
+ if (p.has_ec) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
+
+ if (fsck_err_on(!m || !m->alive, c,
+ "pointer to nonexistent stripe %llu\n"
+ "while marking %s",
+ (u64) p.ec.idx,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
+ do_update = true;
+
+ if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
+ "pointer does not match stripe %llu\n"
+ "while marking %s",
+ (u64) p.ec.idx,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
+ do_update = true;
+ }
+ }
+
+ if (do_update) {
+ struct bkey_ptrs ptrs;
+ union bch_extent_entry *entry;
+ struct bch_extent_ptr *ptr;
+ struct bkey_i *new;
+
+ if (is_root) {
+ bch_err(c, "cannot update btree roots yet");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
+ if (!new) {
+ bch_err_msg(c, ret, "allocating new key");
+ ret = -BCH_ERR_ENOMEM_gc_repair_key;
+ goto err;
+ }
+
+ bkey_reassemble(new, *k);
+
+ if (level) {
+ /*
+ * We don't want to drop btree node pointers - if the
+ * btree node isn't there anymore, the read path will
+ * sort it out:
+ */
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_GC_BUCKET(ca, ptr);
+
+ ptr->gen = g->gen;
+ }
+ } else {
+ bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_GC_BUCKET(ca, ptr);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
+
+ (ptr->cached &&
+ (!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) ||
+ (!ptr->cached &&
+ gen_cmp(ptr->gen, g->gen) < 0) ||
+ gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
+ (g->data_type &&
+ g->data_type != data_type);
+ }));
+again:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
+ entry->stripe_ptr.idx);
+ union bch_extent_entry *next_ptr;
+
+ bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
+ if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
+ goto found;
+ next_ptr = NULL;
+found:
+ if (!next_ptr) {
+ bch_err(c, "aieee, found stripe ptr with no data ptr");
+ continue;
+ }
+
+ if (!m || !m->alive ||
+ !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
+ &next_ptr->ptr,
+ m->sectors)) {
+ bch2_bkey_extent_entry_drop(new, entry);
+ goto again;
+ }
+ }
+ }
+ }
+
+ ret = bch2_journal_key_insert_take(c, btree_id, level, new);
+ if (ret) {
+ kfree(new);
+ goto err;
+ }
+
+ if (level)
+ bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new);
+
+ if (0) {
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, *k);
+ bch_info(c, "updated %s", buf.buf);
+
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
+ bch_info(c, "new key %s", buf.buf);
+ }
+
+ *k = bkey_i_to_s_c(new);
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+/* marking of btree keys/nodes: */
+
+static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
+ unsigned level, bool is_root,
+ struct bkey_s_c *k,
+ bool initial)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey deleted = KEY(0, 0, 0);
+ struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
+ unsigned flags =
+ BTREE_TRIGGER_GC|
+ (initial ? BTREE_TRIGGER_NOATOMIC : 0);
+ int ret = 0;
+
+ deleted.p = k->k->p;
+
+ if (initial) {
+ BUG_ON(bch2_journal_seq_verify &&
+ k->k->version.lo > atomic64_read(&c->journal.seq));
+
+ ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
+ if (ret)
+ goto err;
+
+ if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
+ "key version number higher than recorded: %llu > %llu",
+ k->k->version.lo,
+ atomic64_read(&c->key_version)))
+ atomic64_set(&c->key_version, k->k->version.lo);
+ }
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_mark_key(trans, btree_id, level, old, *k, flags));
+fsck_err:
+err:
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_node_iter iter;
+ struct bkey unpacked;
+ struct bkey_s_c k;
+ struct bkey_buf prev, cur;
+ int ret = 0;
+
+ if (!btree_node_type_needs_gc(btree_node_type(b)))
+ return 0;
+
+ bch2_btree_node_iter_init_from_start(&iter, b);
+ bch2_bkey_buf_init(&prev);
+ bch2_bkey_buf_init(&cur);
+ bkey_init(&prev.k->k);
+
+ while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
+ &k, initial);
+ if (ret)
+ break;
+
+ bch2_btree_node_iter_advance(&iter, b);
+
+ if (b->c.level) {
+ bch2_bkey_buf_reassemble(&cur, c, k);
+
+ ret = bch2_gc_check_topology(c, b, &prev, cur,
+ bch2_btree_node_iter_end(&iter));
+ if (ret)
+ break;
+ }
+ }
+
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_bkey_buf_exit(&prev, c);
+ return ret;
+}
+
+static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
+ bool initial, bool metadata_only)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct btree *b;
+ unsigned depth = metadata_only ? 1 : 0;
+ int ret = 0;
+
+ gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
+
+ __for_each_btree_node(trans, iter, btree_id, POS_MIN,
+ 0, depth, BTREE_ITER_PREFETCH, b, ret) {
+ bch2_verify_btree_nr_keys(b);
+
+ gc_pos_set(c, gc_pos_btree_node(b));
+
+ ret = btree_gc_mark_node(trans, b, initial);
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&c->btree_root_lock);
+ b = bch2_btree_id_root(c, btree_id)->b;
+ if (!btree_node_fake(b)) {
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1,
+ true, &k, initial);
+ }
+ gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
+ mutex_unlock(&c->btree_root_lock);
+
+ return ret;
+}
+
+static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
+ unsigned target_depth)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf cur, prev;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+ bch2_bkey_buf_init(&prev);
+ bch2_bkey_buf_init(&cur);
+ bkey_init(&prev.k->k);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+ BUG_ON(bpos_gt(k.k->p, b->data->max_key));
+
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
+ false, &k, true);
+ if (ret)
+ goto fsck_err;
+
+ if (b->c.level) {
+ bch2_bkey_buf_reassemble(&cur, c, k);
+ k = bkey_i_to_s_c(cur.k);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ ret = bch2_gc_check_topology(c, b,
+ &prev, cur,
+ !bch2_btree_and_journal_iter_peek(&iter).k);
+ if (ret)
+ goto fsck_err;
+ } else {
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+ }
+
+ if (b->c.level > target_depth) {
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ struct btree *child;
+
+ bch2_bkey_buf_reassemble(&cur, c, k);
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ child = bch2_btree_node_get_noiter(trans, cur.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+ ret = PTR_ERR_OR_ZERO(child);
+
+ if (ret == -EIO) {
+ bch2_topology_error(c);
+
+ if (__fsck_err(c,
+ FSCK_CAN_FIX|
+ FSCK_CAN_IGNORE|
+ FSCK_NO_RATELIMIT,
+ "Unreadable btree node at btree %s level %u:\n"
+ " %s",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level - 1,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
+ should_restart_for_topology_repair(c)) {
+ bch_info(c, "Halting mark and sweep to start topology repair pass");
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
+ goto fsck_err;
+ } else {
+ /* Continue marking when opted to not
+ * fix the error: */
+ ret = 0;
+ set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
+ continue;
+ }
+ } else if (ret) {
+ bch_err_msg(c, ret, "getting btree node");
+ break;
+ }
+
+ ret = bch2_gc_btree_init_recurse(trans, child,
+ target_depth);
+ six_unlock_read(&child->c.lock);
+
+ if (ret)
+ break;
+ }
+ }
+fsck_err:
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_bkey_buf_exit(&prev, c);
+ bch2_btree_and_journal_iter_exit(&iter);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int bch2_gc_btree_init(struct btree_trans *trans,
+ enum btree_id btree_id,
+ bool metadata_only)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b;
+ unsigned target_depth = metadata_only ? 1 : 0;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ b = bch2_btree_id_root(c, btree_id)->b;
+
+ if (btree_node_fake(b))
+ return 0;
+
+ six_lock_read(&b->c.lock, NULL, NULL);
+ printbuf_reset(&buf);
+ bch2_bpos_to_text(&buf, b->data->min_key);
+ if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
+ "btree root with incorrect min_key: %s", buf.buf)) {
+ bch_err(c, "repair unimplemented");
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ goto fsck_err;
+ }
+
+ printbuf_reset(&buf);
+ bch2_bpos_to_text(&buf, b->data->max_key);
+ if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
+ "btree root with incorrect max_key: %s", buf.buf)) {
+ bch_err(c, "repair unimplemented");
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ goto fsck_err;
+ }
+
+ if (b->c.level >= target_depth)
+ ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
+
+ if (!ret) {
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+
+ ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, true,
+ &k, true);
+ }
+fsck_err:
+ six_unlock_read(&b->c.lock);
+
+ if (ret < 0)
+ bch_err_fn(c, ret);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
+{
+ return (int) btree_id_to_gc_phase(l) -
+ (int) btree_id_to_gc_phase(r);
+}
+
+static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ enum btree_id ids[BTREE_ID_NR];
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ ids[i] = i;
+ bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
+
+ for (i = 0; i < BTREE_ID_NR && !ret; i++)
+ ret = initial
+ ? bch2_gc_btree_init(trans, ids[i], metadata_only)
+ : bch2_gc_btree(trans, ids[i], initial, metadata_only);
+
+ for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
+ if (!bch2_btree_id_root(c, i)->alive)
+ continue;
+
+ ret = initial
+ ? bch2_gc_btree_init(trans, i, metadata_only)
+ : bch2_gc_btree(trans, i, initial, metadata_only);
+ }
+
+ if (ret < 0)
+ bch_err_fn(c, ret);
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
+ u64 start, u64 end,
+ enum bch_data_type type,
+ unsigned flags)
+{
+ u64 b = sector_to_bucket(ca, start);
+
+ do {
+ unsigned sectors =
+ min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
+
+ bch2_mark_metadata_bucket(c, ca, b, type, sectors,
+ gc_phase(GC_PHASE_SB), flags);
+ b++;
+ start += sectors;
+ } while (start < end);
+}
+
+static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
+ unsigned flags)
+{
+ struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
+ unsigned i;
+ u64 b;
+
+ for (i = 0; i < layout->nr_superblocks; i++) {
+ u64 offset = le64_to_cpu(layout->sb_offset[i]);
+
+ if (offset == BCH_SB_SECTOR)
+ mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
+ BCH_DATA_sb, flags);
+
+ mark_metadata_sectors(c, ca, offset,
+ offset + (1 << layout->sb_max_size_bits),
+ BCH_DATA_sb, flags);
+ }
+
+ for (i = 0; i < ca->journal.nr; i++) {
+ b = ca->journal.buckets[i];
+ bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
+ ca->mi.bucket_size,
+ gc_phase(GC_PHASE_SB), flags);
+ }
+}
+
+static void bch2_mark_superblocks(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ mutex_lock(&c->sb_lock);
+ gc_pos_set(c, gc_phase(GC_PHASE_SB));
+
+ for_each_online_member(ca, c, i)
+ bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
+ mutex_unlock(&c->sb_lock);
+}
+
+#if 0
+/* Also see bch2_pending_btree_node_free_insert_done() */
+static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
+{
+ struct btree_update *as;
+ struct pending_btree_node_free *d;
+
+ mutex_lock(&c->btree_interior_update_lock);
+ gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
+
+ for_each_pending_btree_node_free(c, as, d)
+ if (d->index_update_done)
+ bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
+
+ mutex_unlock(&c->btree_interior_update_lock);
+}
+#endif
+
+static void bch2_gc_free(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ genradix_free(&c->reflink_gc_table);
+ genradix_free(&c->gc_stripes);
+
+ for_each_member_device(ca, c, i) {
+ kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
+ sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket));
+ ca->buckets_gc = NULL;
+
+ free_percpu(ca->usage_gc);
+ ca->usage_gc = NULL;
+ }
+
+ free_percpu(c->usage_gc);
+ c->usage_gc = NULL;
+}
+
+static int bch2_gc_done(struct bch_fs *c,
+ bool initial, bool metadata_only)
+{
+ struct bch_dev *ca = NULL;
+ struct printbuf buf = PRINTBUF;
+ bool verify = !metadata_only &&
+ !c->opts.reconstruct_alloc &&
+ (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
+ unsigned i, dev;
+ int ret = 0;
+
+ percpu_down_write(&c->mark_lock);
+
+#define copy_field(_f, _msg, ...) \
+ if (dst->_f != src->_f && \
+ (!verify || \
+ fsck_err(c, _msg ": got %llu, should be %llu" \
+ , ##__VA_ARGS__, dst->_f, src->_f))) \
+ dst->_f = src->_f
+#define copy_dev_field(_f, _msg, ...) \
+ copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
+#define copy_fs_field(_f, _msg, ...) \
+ copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
+
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ bch2_fs_usage_acc_to_base(c, i);
+
+ for_each_member_device(ca, c, dev) {
+ struct bch_dev_usage *dst = ca->usage_base;
+ struct bch_dev_usage *src = (void *)
+ bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
+ dev_usage_u64s());
+
+ copy_dev_field(buckets_ec, "buckets_ec");
+
+ for (i = 0; i < BCH_DATA_NR; i++) {
+ copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]);
+ copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]);
+ copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+ }
+ }
+
+ {
+ unsigned nr = fs_usage_u64s(c);
+ struct bch_fs_usage *dst = c->usage_base;
+ struct bch_fs_usage *src = (void *)
+ bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
+
+ copy_fs_field(hidden, "hidden");
+ copy_fs_field(btree, "btree");
+
+ if (!metadata_only) {
+ copy_fs_field(data, "data");
+ copy_fs_field(cached, "cached");
+ copy_fs_field(reserved, "reserved");
+ copy_fs_field(nr_inodes,"nr_inodes");
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ copy_fs_field(persistent_reserved[i],
+ "persistent_reserved[%i]", i);
+ }
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+
+ if (metadata_only &&
+ (e->data_type == BCH_DATA_user ||
+ e->data_type == BCH_DATA_cached))
+ continue;
+
+ printbuf_reset(&buf);
+ bch2_replicas_entry_to_text(&buf, e);
+
+ copy_fs_field(replicas[i], "%s", buf.buf);
+ }
+ }
+
+#undef copy_fs_field
+#undef copy_dev_field
+#undef copy_stripe_field
+#undef copy_field
+fsck_err:
+ if (ca)
+ percpu_ref_put(&ca->ref);
+ if (ret)
+ bch_err_fn(c, ret);
+
+ percpu_up_write(&c->mark_lock);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int bch2_gc_start(struct bch_fs *c)
+{
+ struct bch_dev *ca = NULL;
+ unsigned i;
+
+ BUG_ON(c->usage_gc);
+
+ c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
+ sizeof(u64), GFP_KERNEL);
+ if (!c->usage_gc) {
+ bch_err(c, "error allocating c->usage_gc");
+ return -BCH_ERR_ENOMEM_gc_start;
+ }
+
+ for_each_member_device(ca, c, i) {
+ BUG_ON(ca->usage_gc);
+
+ ca->usage_gc = alloc_percpu(struct bch_dev_usage);
+ if (!ca->usage_gc) {
+ bch_err(c, "error allocating ca->usage_gc");
+ percpu_ref_put(&ca->ref);
+ return -BCH_ERR_ENOMEM_gc_start;
+ }
+
+ this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
+ ca->mi.nbuckets - ca->mi.first_bucket);
+ }
+
+ return 0;
+}
+
+static int bch2_gc_reset(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ for_each_member_device(ca, c, i) {
+ free_percpu(ca->usage_gc);
+ ca->usage_gc = NULL;
+ }
+
+ free_percpu(c->usage_gc);
+ c->usage_gc = NULL;
+
+ return bch2_gc_start(c);
+}
+
+/* returns true if not equal */
+static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
+ struct bch_alloc_v4 r)
+{
+ return l.gen != r.gen ||
+ l.oldest_gen != r.oldest_gen ||
+ l.data_type != r.data_type ||
+ l.dirty_sectors != r.dirty_sectors ||
+ l.cached_sectors != r.cached_sectors ||
+ l.stripe_redundancy != r.stripe_redundancy ||
+ l.stripe != r.stripe;
+}
+
+static int bch2_alloc_write_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ bool metadata_only)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
+ struct bucket gc, *b;
+ struct bkey_i_alloc_v4 *a;
+ struct bch_alloc_v4 old_convert, new;
+ const struct bch_alloc_v4 *old;
+ enum bch_data_type type;
+ int ret;
+
+ if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
+ return 1;
+
+ old = bch2_alloc_to_v4(k, &old_convert);
+ new = *old;
+
+ percpu_down_read(&c->mark_lock);
+ b = gc_bucket(ca, iter->pos.offset);
+
+ /*
+ * b->data_type doesn't yet include need_discard & need_gc_gen states -
+ * fix that here:
+ */
+ type = __alloc_data_type(b->dirty_sectors,
+ b->cached_sectors,
+ b->stripe,
+ *old,
+ b->data_type);
+ if (b->data_type != type) {
+ struct bch_dev_usage *u;
+
+ preempt_disable();
+ u = this_cpu_ptr(ca->usage_gc);
+ u->d[b->data_type].buckets--;
+ b->data_type = type;
+ u->d[b->data_type].buckets++;
+ preempt_enable();
+ }
+
+ gc = *b;
+ percpu_up_read(&c->mark_lock);
+
+ if (metadata_only &&
+ gc.data_type != BCH_DATA_sb &&
+ gc.data_type != BCH_DATA_journal &&
+ gc.data_type != BCH_DATA_btree)
+ return 0;
+
+ if (gen_after(old->gen, gc.gen))
+ return 0;
+
+ if (c->opts.reconstruct_alloc ||
+ fsck_err_on(new.data_type != gc.data_type, c,
+ "bucket %llu:%llu gen %u has wrong data_type"
+ ": got %s, should be %s",
+ iter->pos.inode, iter->pos.offset,
+ gc.gen,
+ bch2_data_types[new.data_type],
+ bch2_data_types[gc.data_type]))
+ new.data_type = gc.data_type;
+
+#define copy_bucket_field(_f) \
+ if (c->opts.reconstruct_alloc || \
+ fsck_err_on(new._f != gc._f, c, \
+ "bucket %llu:%llu gen %u data type %s has wrong " #_f \
+ ": got %u, should be %u", \
+ iter->pos.inode, iter->pos.offset, \
+ gc.gen, \
+ bch2_data_types[gc.data_type], \
+ new._f, gc._f)) \
+ new._f = gc._f; \
+
+ copy_bucket_field(gen);
+ copy_bucket_field(dirty_sectors);
+ copy_bucket_field(cached_sectors);
+ copy_bucket_field(stripe_redundancy);
+ copy_bucket_field(stripe);
+#undef copy_bucket_field
+
+ if (!bch2_alloc_v4_cmp(*old, new))
+ return 0;
+
+ a = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ return ret;
+
+ a->v = new;
+
+ /*
+ * The trigger normally makes sure this is set, but we're not running
+ * triggers:
+ */
+ if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
+ a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+
+ ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
+fsck_err:
+ return ret;
+}
+
+static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_dev *ca;
+ unsigned i;
+ int ret = 0;
+
+ for_each_member_device(ca, c, i) {
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
+ POS(ca->dev_idx, ca->mi.first_bucket),
+ BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW,
+ bch2_alloc_write_key(trans, &iter, k, metadata_only));
+
+ if (ret < 0) {
+ bch_err_fn(c, ret);
+ percpu_ref_put(&ca->ref);
+ break;
+ }
+ }
+
+ bch2_trans_put(trans);
+ return ret < 0 ? ret : 0;
+}
+
+static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
+{
+ struct bch_dev *ca;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bucket *g;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+ unsigned i;
+ int ret;
+
+ for_each_member_device(ca, c, i) {
+ struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket),
+ GFP_KERNEL|__GFP_ZERO);
+ if (!buckets) {
+ percpu_ref_put(&ca->ref);
+ bch_err(c, "error allocating ca->buckets[gc]");
+ ret = -BCH_ERR_ENOMEM_gc_alloc_start;
+ goto err;
+ }
+
+ buckets->first_bucket = ca->mi.first_bucket;
+ buckets->nbuckets = ca->mi.nbuckets;
+ rcu_assign_pointer(ca->buckets_gc, buckets);
+ }
+
+ for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ g = gc_bucket(ca, k.k->p.offset);
+
+ a = bch2_alloc_to_v4(k, &a_convert);
+
+ g->gen_valid = 1;
+ g->gen = a->gen;
+
+ if (metadata_only &&
+ (a->data_type == BCH_DATA_user ||
+ a->data_type == BCH_DATA_cached ||
+ a->data_type == BCH_DATA_parity)) {
+ g->data_type = a->data_type;
+ g->dirty_sectors = a->dirty_sectors;
+ g->cached_sectors = a->cached_sectors;
+ g->stripe = a->stripe;
+ g->stripe_redundancy = a->stripe_redundancy;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ bch2_trans_put(trans);
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ for_each_member_device(ca, c, i) {
+ struct bucket_array *buckets = gc_bucket_array(ca);
+ struct bucket *g;
+
+ for_each_bucket(g, buckets) {
+ if (metadata_only &&
+ (g->data_type == BCH_DATA_user ||
+ g->data_type == BCH_DATA_cached ||
+ g->data_type == BCH_DATA_parity))
+ continue;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ }
+ }
+}
+
+static int bch2_gc_write_reflink_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ size_t *idx)
+{
+ struct bch_fs *c = trans->c;
+ const __le64 *refcount = bkey_refcount_c(k);
+ struct printbuf buf = PRINTBUF;
+ struct reflink_gc *r;
+ int ret = 0;
+
+ if (!refcount)
+ return 0;
+
+ while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
+ r->offset < k.k->p.offset)
+ ++*idx;
+
+ if (!r ||
+ r->offset != k.k->p.offset ||
+ r->size != k.k->size) {
+ bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
+ return -EINVAL;
+ }
+
+ if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
+ "reflink key has wrong refcount:\n"
+ " %s\n"
+ " should be %u",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
+ r->refcount)) {
+ struct bkey_i *new = bch2_bkey_make_mut(trans, iter, &k, 0);
+
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ return ret;
+
+ if (!r->refcount)
+ new->k.type = KEY_TYPE_deleted;
+ else
+ *bkey_refcount(new) = cpu_to_le64(r->refcount);
+ }
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ size_t idx = 0;
+ int ret = 0;
+
+ if (metadata_only)
+ return 0;
+
+ trans = bch2_trans_get(c);
+
+ ret = for_each_btree_key_commit(trans, iter,
+ BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_gc_write_reflink_key(trans, &iter, k, &idx));
+
+ c->reflink_gc_nr = 0;
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int bch2_gc_reflink_start(struct bch_fs *c,
+ bool metadata_only)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct reflink_gc *r;
+ int ret = 0;
+
+ if (metadata_only)
+ return 0;
+
+ trans = bch2_trans_get(c);
+ c->reflink_gc_nr = 0;
+
+ for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ const __le64 *refcount = bkey_refcount_c(k);
+
+ if (!refcount)
+ continue;
+
+ r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
+ GFP_KERNEL);
+ if (!r) {
+ ret = -BCH_ERR_ENOMEM_gc_reflink_start;
+ break;
+ }
+
+ r->offset = k.k->p.offset;
+ r->size = k.k->size;
+ r->refcount = 0;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
+{
+ struct genradix_iter iter;
+ struct reflink_gc *r;
+
+ genradix_for_each(&c->reflink_gc_table, iter, r)
+ r->refcount = 0;
+}
+
+static int bch2_gc_write_stripes_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ const struct bch_stripe *s;
+ struct gc_stripe *m;
+ bool bad = false;
+ unsigned i;
+ int ret = 0;
+
+ if (k.k->type != KEY_TYPE_stripe)
+ return 0;
+
+ s = bkey_s_c_to_stripe(k).v;
+ m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
+
+ for (i = 0; i < s->nr_blocks; i++) {
+ u32 old = stripe_blockcount_get(s, i);
+ u32 new = (m ? m->block_sectors[i] : 0);
+
+ if (old != new) {
+ prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n",
+ i, old, new);
+ bad = true;
+ }
+ }
+
+ if (bad)
+ bch2_bkey_val_to_text(&buf, c, k);
+
+ if (fsck_err_on(bad, c, "%s", buf.buf)) {
+ struct bkey_i_stripe *new;
+
+ new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ return ret;
+
+ bkey_reassemble(&new->k_i, k);
+
+ for (i = 0; i < new->v.nr_blocks; i++)
+ stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
+
+ ret = bch2_trans_update(trans, iter, &new->k_i, 0);
+ }
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ if (metadata_only)
+ return 0;
+
+ trans = bch2_trans_get(c);
+
+ ret = for_each_btree_key_commit(trans, iter,
+ BTREE_ID_stripes, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_gc_write_stripes_key(trans, &iter, k));
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
+{
+ genradix_free(&c->gc_stripes);
+}
+
+/**
+ * bch2_gc - walk _all_ references to buckets, and recompute them:
+ *
+ * @c: filesystem object
+ * @initial: are we in recovery?
+ * @metadata_only: are we just checking metadata references, or everything?
+ *
+ * Returns: 0 on success, or standard errcode on failure
+ *
+ * Order matters here:
+ * - Concurrent GC relies on the fact that we have a total ordering for
+ * everything that GC walks - see gc_will_visit_node(),
+ * gc_will_visit_root()
+ *
+ * - also, references move around in the course of index updates and
+ * various other crap: everything needs to agree on the ordering
+ * references are allowed to move around in - e.g., we're allowed to
+ * start with a reference owned by an open_bucket (the allocator) and
+ * move it to the btree, but not the reverse.
+ *
+ * This is necessary to ensure that gc doesn't miss references that
+ * move around - if references move backwards in the ordering GC
+ * uses, GC could skip past them
+ */
+int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
+{
+ unsigned iter = 0;
+ int ret;
+
+ lockdep_assert_held(&c->state_lock);
+
+ down_write(&c->gc_lock);
+
+ bch2_btree_interior_updates_flush(c);
+
+ ret = bch2_gc_start(c) ?:
+ bch2_gc_alloc_start(c, metadata_only) ?:
+ bch2_gc_reflink_start(c, metadata_only);
+ if (ret)
+ goto out;
+again:
+ gc_pos_set(c, gc_phase(GC_PHASE_START));
+
+ bch2_mark_superblocks(c);
+
+ ret = bch2_gc_btrees(c, initial, metadata_only);
+
+ if (ret)
+ goto out;
+
+#if 0
+ bch2_mark_pending_btree_node_frees(c);
+#endif
+ c->gc_count++;
+
+ if (test_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags) ||
+ (!iter && bch2_test_restart_gc)) {
+ if (iter++ > 2) {
+ bch_info(c, "Unable to fix bucket gens, looping");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * XXX: make sure gens we fixed got saved
+ */
+ bch_info(c, "Second GC pass needed, restarting:");
+ clear_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
+ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+
+ bch2_gc_stripes_reset(c, metadata_only);
+ bch2_gc_alloc_reset(c, metadata_only);
+ bch2_gc_reflink_reset(c, metadata_only);
+ ret = bch2_gc_reset(c);
+ if (ret)
+ goto out;
+
+ /* flush fsck errors, reset counters */
+ bch2_flush_fsck_errs(c);
+ goto again;
+ }
+out:
+ if (!ret) {
+ bch2_journal_block(&c->journal);
+
+ ret = bch2_gc_stripes_done(c, metadata_only) ?:
+ bch2_gc_reflink_done(c, metadata_only) ?:
+ bch2_gc_alloc_done(c, metadata_only) ?:
+ bch2_gc_done(c, initial, metadata_only);
+
+ bch2_journal_unblock(&c->journal);
+ }
+
+ percpu_down_write(&c->mark_lock);
+ /* Indicates that gc is no longer in progress: */
+ __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
+
+ bch2_gc_free(c);
+ percpu_up_write(&c->mark_lock);
+
+ up_write(&c->gc_lock);
+
+ /*
+ * At startup, allocations can happen directly instead of via the
+ * allocator thread - issue wakeup in case they blocked on gc_lock:
+ */
+ closure_wake_up(&c->freelist_wait);
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int gc_btree_gens_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+ struct bkey_i *u;
+ int ret;
+
+ percpu_down_read(&c->mark_lock);
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ if (ptr_stale(ca, ptr) > 16) {
+ percpu_up_read(&c->mark_lock);
+ goto update;
+ }
+ }
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
+
+ if (gen_after(*gen, ptr->gen))
+ *gen = ptr->gen;
+ }
+ percpu_up_read(&c->mark_lock);
+ return 0;
+update:
+ u = bch2_bkey_make_mut(trans, iter, &k, 0);
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ return ret;
+
+ bch2_extent_normalize(c, bkey_i_to_s(u));
+ return 0;
+}
+
+static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+ struct bkey_i_alloc_v4 *a_mut;
+ int ret;
+
+ if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
+ return 0;
+
+ a_mut = bch2_alloc_to_v4_mut(trans, k);
+ ret = PTR_ERR_OR_ZERO(a_mut);
+ if (ret)
+ return ret;
+
+ a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
+ a_mut->v.data_type = alloc_data_type(a_mut->v, a_mut->v.data_type);
+
+ return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
+}
+
+int bch2_gc_gens(struct bch_fs *c)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_dev *ca;
+ u64 b, start_time = local_clock();
+ unsigned i;
+ int ret;
+
+ /*
+ * Ideally we would be using state_lock and not gc_lock here, but that
+ * introduces a deadlock in the RO path - we currently take the state
+ * lock at the start of going RO, thus the gc thread may get stuck:
+ */
+ if (!mutex_trylock(&c->gc_gens_lock))
+ return 0;
+
+ trace_and_count(c, gc_gens_start, c);
+ down_read(&c->gc_lock);
+ trans = bch2_trans_get(c);
+
+ for_each_member_device(ca, c, i) {
+ struct bucket_gens *gens;
+
+ BUG_ON(ca->oldest_gen);
+
+ ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL);
+ if (!ca->oldest_gen) {
+ percpu_ref_put(&ca->ref);
+ ret = -BCH_ERR_ENOMEM_gc_gens;
+ goto err;
+ }
+
+ gens = bucket_gens(ca);
+
+ for (b = gens->first_bucket;
+ b < gens->nbuckets; b++)
+ ca->oldest_gen[b] = gens->b[b];
+ }
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ if (btree_type_has_ptrs(i)) {
+ c->gc_gens_btree = i;
+ c->gc_gens_pos = POS_MIN;
+
+ ret = for_each_btree_key_commit(trans, iter, i,
+ POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ k,
+ NULL, NULL,
+ BTREE_INSERT_NOFAIL,
+ gc_btree_gens_key(trans, &iter, k));
+ if (ret && !bch2_err_matches(ret, EROFS))
+ bch_err_fn(c, ret);
+ if (ret)
+ goto err;
+ }
+
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
+ POS_MIN,
+ BTREE_ITER_PREFETCH,
+ k,
+ NULL, NULL,
+ BTREE_INSERT_NOFAIL,
+ bch2_alloc_write_oldest_gen(trans, &iter, k));
+ if (ret && !bch2_err_matches(ret, EROFS))
+ bch_err_fn(c, ret);
+ if (ret)
+ goto err;
+
+ c->gc_gens_btree = 0;
+ c->gc_gens_pos = POS_MIN;
+
+ c->gc_count++;
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
+ trace_and_count(c, gc_gens_end, c);
+err:
+ for_each_member_device(ca, c, i) {
+ kvfree(ca->oldest_gen);
+ ca->oldest_gen = NULL;
+ }
+
+ bch2_trans_put(trans);
+ up_read(&c->gc_lock);
+ mutex_unlock(&c->gc_gens_lock);
+ return ret;
+}
+
+static int bch2_gc_thread(void *arg)
+{
+ struct bch_fs *c = arg;
+ struct io_clock *clock = &c->io_clock[WRITE];
+ unsigned long last = atomic64_read(&clock->now);
+ unsigned last_kick = atomic_read(&c->kick_gc);
+ int ret;
+
+ set_freezable();
+
+ while (1) {
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+
+ if (atomic_read(&c->kick_gc) != last_kick)
+ break;
+
+ if (c->btree_gc_periodic) {
+ unsigned long next = last + c->capacity / 16;
+
+ if (atomic64_read(&clock->now) >= next)
+ break;
+
+ bch2_io_clock_schedule_timeout(clock, next);
+ } else {
+ schedule();
+ }
+
+ try_to_freeze();
+ }
+ __set_current_state(TASK_RUNNING);
+
+ last = atomic64_read(&clock->now);
+ last_kick = atomic_read(&c->kick_gc);
+
+ /*
+ * Full gc is currently incompatible with btree key cache:
+ */
+#if 0
+ ret = bch2_gc(c, false, false);
+#else
+ ret = bch2_gc_gens(c);
+#endif
+ if (ret < 0)
+ bch_err_fn(c, ret);
+
+ debug_check_no_locks_held();
+ }
+
+ return 0;
+}
+
+void bch2_gc_thread_stop(struct bch_fs *c)
+{
+ struct task_struct *p;
+
+ p = c->gc_thread;
+ c->gc_thread = NULL;
+
+ if (p) {
+ kthread_stop(p);
+ put_task_struct(p);
+ }
+}
+
+int bch2_gc_thread_start(struct bch_fs *c)
+{
+ struct task_struct *p;
+
+ if (c->gc_thread)
+ return 0;
+
+ p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
+ if (IS_ERR(p)) {
+ bch_err_fn(c, PTR_ERR(p));
+ return PTR_ERR(p);
+ }
+
+ get_task_struct(p);
+ c->gc_thread = p;
+ wake_up_process(p);
+ return 0;
+}
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
new file mode 100644
index 000000000000..607575f83a00
--- /dev/null
+++ b/fs/bcachefs/btree_gc.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_GC_H
+#define _BCACHEFS_BTREE_GC_H
+
+#include "bkey.h"
+#include "btree_types.h"
+
+int bch2_check_topology(struct bch_fs *);
+int bch2_gc(struct bch_fs *, bool, bool);
+int bch2_gc_gens(struct bch_fs *);
+void bch2_gc_thread_stop(struct bch_fs *);
+int bch2_gc_thread_start(struct bch_fs *);
+
+/*
+ * For concurrent mark and sweep (with other index updates), we define a total
+ * ordering of _all_ references GC walks:
+ *
+ * Note that some references will have the same GC position as others - e.g.
+ * everything within the same btree node; in those cases we're relying on
+ * whatever locking exists for where those references live, i.e. the write lock
+ * on a btree node.
+ *
+ * That locking is also required to ensure GC doesn't pass the updater in
+ * between the updater adding/removing the reference and updating the GC marks;
+ * without that, we would at best double count sometimes.
+ *
+ * That part is important - whenever calling bch2_mark_pointers(), a lock _must_
+ * be held that prevents GC from passing the position the updater is at.
+ *
+ * (What about the start of gc, when we're clearing all the marks? GC clears the
+ * mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
+ * position inside its cmpxchg loop, so crap magically works).
+ */
+
+/* Position of (the start of) a gc phase: */
+static inline struct gc_pos gc_phase(enum gc_phase phase)
+{
+ return (struct gc_pos) {
+ .phase = phase,
+ .pos = POS_MIN,
+ .level = 0,
+ };
+}
+
+static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
+{
+ return cmp_int(l.phase, r.phase) ?:
+ bpos_cmp(l.pos, r.pos) ?:
+ cmp_int(l.level, r.level);
+}
+
+static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
+{
+ switch (id) {
+#define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
+ BCH_BTREE_IDS()
+#undef x
+ default:
+ BUG();
+ }
+}
+
+static inline struct gc_pos gc_pos_btree(enum btree_id id,
+ struct bpos pos, unsigned level)
+{
+ return (struct gc_pos) {
+ .phase = btree_id_to_gc_phase(id),
+ .pos = pos,
+ .level = level,
+ };
+}
+
+/*
+ * GC position of the pointers within a btree node: note, _not_ for &b->key
+ * itself, that lives in the parent node:
+ */
+static inline struct gc_pos gc_pos_btree_node(struct btree *b)
+{
+ return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
+}
+
+/*
+ * GC position of the pointer to a btree root: we don't use
+ * gc_pos_pointer_to_btree_node() here to avoid a potential race with
+ * btree_split() increasing the tree depth - the new root will have level > the
+ * old root and thus have a greater gc position than the old root, but that
+ * would be incorrect since once gc has marked the root it's not coming back.
+ */
+static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
+{
+ return gc_pos_btree(id, SPOS_MAX, BTREE_MAX_DEPTH);
+}
+
+static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
+{
+ unsigned seq;
+ bool ret;
+
+ do {
+ seq = read_seqcount_begin(&c->gc_pos_lock);
+ ret = gc_pos_cmp(pos, c->gc_pos) <= 0;
+ } while (read_seqcount_retry(&c->gc_pos_lock, seq));
+
+ return ret;
+}
+
+static inline void bch2_do_gc_gens(struct bch_fs *c)
+{
+ atomic_inc(&c->kick_gc);
+ if (c->gc_thread)
+ wake_up_process(c->gc_thread);
+}
+
+#endif /* _BCACHEFS_BTREE_GC_H */
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
new file mode 100644
index 000000000000..a869cf6ac7c6
--- /dev/null
+++ b/fs/bcachefs/btree_io.c
@@ -0,0 +1,2223 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_methods.h"
+#include "bkey_sort.h"
+#include "btree_cache.h"
+#include "btree_io.h"
+#include "btree_iter.h"
+#include "btree_locking.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
+#include "checksum.h"
+#include "debug.h"
+#include "error.h"
+#include "extents.h"
+#include "io_write.h"
+#include "journal_reclaim.h"
+#include "journal_seq_blacklist.h"
+#include "recovery.h"
+#include "super-io.h"
+#include "trace.h"
+
+#include <linux/sched/mm.h>
+
+void bch2_btree_node_io_unlock(struct btree *b)
+{
+ EBUG_ON(!btree_node_write_in_flight(b));
+
+ clear_btree_node_write_in_flight_inner(b);
+ clear_btree_node_write_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
+}
+
+void bch2_btree_node_io_lock(struct btree *b)
+{
+ bch2_assert_btree_nodes_not_locked();
+
+ wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void __bch2_btree_node_wait_on_read(struct btree *b)
+{
+ wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void __bch2_btree_node_wait_on_write(struct btree *b)
+{
+ wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void bch2_btree_node_wait_on_read(struct btree *b)
+{
+ bch2_assert_btree_nodes_not_locked();
+
+ wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+void bch2_btree_node_wait_on_write(struct btree *b)
+{
+ bch2_assert_btree_nodes_not_locked();
+
+ wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
+ TASK_UNINTERRUPTIBLE);
+}
+
+static void verify_no_dups(struct btree *b,
+ struct bkey_packed *start,
+ struct bkey_packed *end)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct bkey_packed *k, *p;
+
+ if (start == end)
+ return;
+
+ for (p = start, k = bkey_p_next(start);
+ k != end;
+ p = k, k = bkey_p_next(k)) {
+ struct bkey l = bkey_unpack_key(b, p);
+ struct bkey r = bkey_unpack_key(b, k);
+
+ BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
+ }
+#endif
+}
+
+static void set_needs_whiteout(struct bset *i, int v)
+{
+ struct bkey_packed *k;
+
+ for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
+ k->needs_whiteout = v;
+}
+
+static void btree_bounce_free(struct bch_fs *c, size_t size,
+ bool used_mempool, void *p)
+{
+ if (used_mempool)
+ mempool_free(p, &c->btree_bounce_pool);
+ else
+ vpfree(p, size);
+}
+
+static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
+ bool *used_mempool)
+{
+ unsigned flags = memalloc_nofs_save();
+ void *p;
+
+ BUG_ON(size > btree_bytes(c));
+
+ *used_mempool = false;
+ p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
+ if (!p) {
+ *used_mempool = true;
+ p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
+ }
+ memalloc_nofs_restore(flags);
+ return p;
+}
+
+static void sort_bkey_ptrs(const struct btree *bt,
+ struct bkey_packed **ptrs, unsigned nr)
+{
+ unsigned n = nr, a = nr / 2, b, c, d;
+
+ if (!a)
+ return;
+
+ /* Heap sort: see lib/sort.c: */
+ while (1) {
+ if (a)
+ a--;
+ else if (--n)
+ swap(ptrs[0], ptrs[n]);
+ else
+ break;
+
+ for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
+ b = bch2_bkey_cmp_packed(bt,
+ ptrs[c],
+ ptrs[d]) >= 0 ? c : d;
+ if (d == n)
+ b = c;
+
+ while (b != a &&
+ bch2_bkey_cmp_packed(bt,
+ ptrs[a],
+ ptrs[b]) >= 0)
+ b = (b - 1) / 2;
+ c = b;
+ while (b != a) {
+ b = (b - 1) / 2;
+ swap(ptrs[b], ptrs[c]);
+ }
+ }
+}
+
+static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
+{
+ struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
+ bool used_mempool = false;
+ size_t bytes = b->whiteout_u64s * sizeof(u64);
+
+ if (!b->whiteout_u64s)
+ return;
+
+ new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
+
+ ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
+
+ for (k = unwritten_whiteouts_start(c, b);
+ k != unwritten_whiteouts_end(c, b);
+ k = bkey_p_next(k))
+ *--ptrs = k;
+
+ sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
+
+ k = new_whiteouts;
+
+ while (ptrs != ptrs_end) {
+ bkey_copy(k, *ptrs);
+ k = bkey_p_next(k);
+ ptrs++;
+ }
+
+ verify_no_dups(b, new_whiteouts,
+ (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
+
+ memcpy_u64s(unwritten_whiteouts_start(c, b),
+ new_whiteouts, b->whiteout_u64s);
+
+ btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
+}
+
+static bool should_compact_bset(struct btree *b, struct bset_tree *t,
+ bool compacting, enum compact_mode mode)
+{
+ if (!bset_dead_u64s(b, t))
+ return false;
+
+ switch (mode) {
+ case COMPACT_LAZY:
+ return should_compact_bset_lazy(b, t) ||
+ (compacting && !bset_written(b, bset(b, t)));
+ case COMPACT_ALL:
+ return true;
+ default:
+ BUG();
+ }
+}
+
+static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
+{
+ struct bset_tree *t;
+ bool ret = false;
+
+ for_each_bset(b, t) {
+ struct bset *i = bset(b, t);
+ struct bkey_packed *k, *n, *out, *start, *end;
+ struct btree_node_entry *src = NULL, *dst = NULL;
+
+ if (t != b->set && !bset_written(b, i)) {
+ src = container_of(i, struct btree_node_entry, keys);
+ dst = max(write_block(b),
+ (void *) btree_bkey_last(b, t - 1));
+ }
+
+ if (src != dst)
+ ret = true;
+
+ if (!should_compact_bset(b, t, ret, mode)) {
+ if (src != dst) {
+ memmove(dst, src, sizeof(*src) +
+ le16_to_cpu(src->keys.u64s) *
+ sizeof(u64));
+ i = &dst->keys;
+ set_btree_bset(b, t, i);
+ }
+ continue;
+ }
+
+ start = btree_bkey_first(b, t);
+ end = btree_bkey_last(b, t);
+
+ if (src != dst) {
+ memmove(dst, src, sizeof(*src));
+ i = &dst->keys;
+ set_btree_bset(b, t, i);
+ }
+
+ out = i->start;
+
+ for (k = start; k != end; k = n) {
+ n = bkey_p_next(k);
+
+ if (!bkey_deleted(k)) {
+ bkey_copy(out, k);
+ out = bkey_p_next(out);
+ } else {
+ BUG_ON(k->needs_whiteout);
+ }
+ }
+
+ i->u64s = cpu_to_le16((u64 *) out - i->_data);
+ set_btree_bset_end(b, t);
+ bch2_bset_set_no_aux_tree(b, t);
+ ret = true;
+ }
+
+ bch2_verify_btree_nr_keys(b);
+
+ bch2_btree_build_aux_trees(b);
+
+ return ret;
+}
+
+bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
+ enum compact_mode mode)
+{
+ return bch2_drop_whiteouts(b, mode);
+}
+
+static void btree_node_sort(struct bch_fs *c, struct btree *b,
+ unsigned start_idx,
+ unsigned end_idx,
+ bool filter_whiteouts)
+{
+ struct btree_node *out;
+ struct sort_iter_stack sort_iter;
+ struct bset_tree *t;
+ struct bset *start_bset = bset(b, &b->set[start_idx]);
+ bool used_mempool = false;
+ u64 start_time, seq = 0;
+ unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
+ bool sorting_entire_node = start_idx == 0 &&
+ end_idx == b->nsets;
+
+ sort_iter_stack_init(&sort_iter, b);
+
+ for (t = b->set + start_idx;
+ t < b->set + end_idx;
+ t++) {
+ u64s += le16_to_cpu(bset(b, t)->u64s);
+ sort_iter_add(&sort_iter.iter,
+ btree_bkey_first(b, t),
+ btree_bkey_last(b, t));
+ }
+
+ bytes = sorting_entire_node
+ ? btree_bytes(c)
+ : __vstruct_bytes(struct btree_node, u64s);
+
+ out = btree_bounce_alloc(c, bytes, &used_mempool);
+
+ start_time = local_clock();
+
+ u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
+
+ out->keys.u64s = cpu_to_le16(u64s);
+
+ BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
+
+ if (sorting_entire_node)
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
+ start_time);
+
+ /* Make sure we preserve bset journal_seq: */
+ for (t = b->set + start_idx; t < b->set + end_idx; t++)
+ seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
+ start_bset->journal_seq = cpu_to_le64(seq);
+
+ if (sorting_entire_node) {
+ u64s = le16_to_cpu(out->keys.u64s);
+
+ BUG_ON(bytes != btree_bytes(c));
+
+ /*
+ * Our temporary buffer is the same size as the btree node's
+ * buffer, we can just swap buffers instead of doing a big
+ * memcpy()
+ */
+ *out = *b->data;
+ out->keys.u64s = cpu_to_le16(u64s);
+ swap(out, b->data);
+ set_btree_bset(b, b->set, &b->data->keys);
+ } else {
+ start_bset->u64s = out->keys.u64s;
+ memcpy_u64s(start_bset->start,
+ out->keys.start,
+ le16_to_cpu(out->keys.u64s));
+ }
+
+ for (i = start_idx + 1; i < end_idx; i++)
+ b->nr.bset_u64s[start_idx] +=
+ b->nr.bset_u64s[i];
+
+ b->nsets -= shift;
+
+ for (i = start_idx + 1; i < b->nsets; i++) {
+ b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
+ b->set[i] = b->set[i + shift];
+ }
+
+ for (i = b->nsets; i < MAX_BSETS; i++)
+ b->nr.bset_u64s[i] = 0;
+
+ set_btree_bset_end(b, &b->set[start_idx]);
+ bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
+
+ btree_bounce_free(c, bytes, used_mempool, out);
+
+ bch2_verify_btree_nr_keys(b);
+}
+
+void bch2_btree_sort_into(struct bch_fs *c,
+ struct btree *dst,
+ struct btree *src)
+{
+ struct btree_nr_keys nr;
+ struct btree_node_iter src_iter;
+ u64 start_time = local_clock();
+
+ BUG_ON(dst->nsets != 1);
+
+ bch2_bset_set_no_aux_tree(dst, dst->set);
+
+ bch2_btree_node_iter_init_from_start(&src_iter, src);
+
+ nr = bch2_sort_repack(btree_bset_first(dst),
+ src, &src_iter,
+ &dst->format,
+ true);
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
+ start_time);
+
+ set_btree_bset_end(dst, dst->set);
+
+ dst->nr.live_u64s += nr.live_u64s;
+ dst->nr.bset_u64s[0] += nr.bset_u64s[0];
+ dst->nr.packed_keys += nr.packed_keys;
+ dst->nr.unpacked_keys += nr.unpacked_keys;
+
+ bch2_verify_btree_nr_keys(dst);
+}
+
+/*
+ * We're about to add another bset to the btree node, so if there's currently
+ * too many bsets - sort some of them together:
+ */
+static bool btree_node_compact(struct bch_fs *c, struct btree *b)
+{
+ unsigned unwritten_idx;
+ bool ret = false;
+
+ for (unwritten_idx = 0;
+ unwritten_idx < b->nsets;
+ unwritten_idx++)
+ if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
+ break;
+
+ if (b->nsets - unwritten_idx > 1) {
+ btree_node_sort(c, b, unwritten_idx,
+ b->nsets, false);
+ ret = true;
+ }
+
+ if (unwritten_idx > 1) {
+ btree_node_sort(c, b, 0, unwritten_idx, false);
+ ret = true;
+ }
+
+ return ret;
+}
+
+void bch2_btree_build_aux_trees(struct btree *b)
+{
+ struct bset_tree *t;
+
+ for_each_bset(b, t)
+ bch2_bset_build_aux_tree(b, t,
+ !bset_written(b, bset(b, t)) &&
+ t == bset_tree_last(b));
+}
+
+/*
+ * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
+ *
+ * The first bset is going to be of similar order to the size of the node, the
+ * last bset is bounded by btree_write_set_buffer(), which is set to keep the
+ * memmove on insert from being too expensive: the middle bset should, ideally,
+ * be the geometric mean of the first and the last.
+ *
+ * Returns true if the middle bset is greater than that geometric mean:
+ */
+static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
+{
+ unsigned mid_u64s_bits =
+ (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
+
+ return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
+}
+
+/*
+ * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
+ * inserted into
+ *
+ * Safe to call if there already is an unwritten bset - will only add a new bset
+ * if @b doesn't already have one.
+ *
+ * Returns true if we sorted (i.e. invalidated iterators
+ */
+void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_node_entry *bne;
+ bool reinit_iter = false;
+
+ EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
+ BUG_ON(bset_written(b, bset(b, &b->set[1])));
+ BUG_ON(btree_node_just_written(b));
+
+ if (b->nsets == MAX_BSETS &&
+ !btree_node_write_in_flight(b) &&
+ should_compact_all(c, b)) {
+ bch2_btree_node_write(c, b, SIX_LOCK_write,
+ BTREE_WRITE_init_next_bset);
+ reinit_iter = true;
+ }
+
+ if (b->nsets == MAX_BSETS &&
+ btree_node_compact(c, b))
+ reinit_iter = true;
+
+ BUG_ON(b->nsets >= MAX_BSETS);
+
+ bne = want_new_bset(c, b);
+ if (bne)
+ bch2_bset_init_next(c, b, bne);
+
+ bch2_btree_build_aux_trees(b);
+
+ if (reinit_iter)
+ bch2_trans_node_reinit_iter(trans, b);
+}
+
+static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
+ struct btree *b)
+{
+ prt_printf(out, "%s level %u/%u\n ",
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level,
+ bch2_btree_id_root(c, b->c.btree_id)->level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+}
+
+static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
+ struct bch_dev *ca,
+ struct btree *b, struct bset *i,
+ unsigned offset, int write)
+{
+ prt_printf(out, bch2_log_msg(c, "%s"),
+ write == READ
+ ? "error validating btree node "
+ : "corrupt btree node before write ");
+ if (ca)
+ prt_printf(out, "on %s ", ca->name);
+ prt_printf(out, "at btree ");
+ btree_pos_to_text(out, c, b);
+
+ prt_printf(out, "\n node offset %u", b->written);
+ if (i)
+ prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
+ prt_str(out, ": ");
+}
+
+__printf(8, 9)
+static int __btree_err(int ret,
+ struct bch_fs *c,
+ struct bch_dev *ca,
+ struct btree *b,
+ struct bset *i,
+ int write,
+ bool have_retry,
+ const char *fmt, ...)
+{
+ struct printbuf out = PRINTBUF;
+ va_list args;
+
+ btree_err_msg(&out, c, ca, b, i, b->written, write);
+
+ va_start(args, fmt);
+ prt_vprintf(&out, fmt, args);
+ va_end(args);
+
+ if (write == WRITE) {
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ ret = c->opts.errors == BCH_ON_ERROR_continue
+ ? 0
+ : -BCH_ERR_fsck_errors_not_fixed;
+ goto out;
+ }
+
+ if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
+ ret = -BCH_ERR_btree_node_read_err_fixable;
+ if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
+ ret = -BCH_ERR_btree_node_read_err_bad_node;
+
+ switch (ret) {
+ case -BCH_ERR_btree_node_read_err_fixable:
+ mustfix_fsck_err(c, "%s", out.buf);
+ ret = -BCH_ERR_fsck_fix;
+ break;
+ case -BCH_ERR_btree_node_read_err_want_retry:
+ case -BCH_ERR_btree_node_read_err_must_retry:
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ break;
+ case -BCH_ERR_btree_node_read_err_bad_node:
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ bch2_topology_error(c);
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?: -EIO;
+ break;
+ case -BCH_ERR_btree_node_read_err_incompatible:
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
+ ret = -BCH_ERR_fsck_errors_not_fixed;
+ break;
+ default:
+ BUG();
+ }
+out:
+fsck_err:
+ printbuf_exit(&out);
+ return ret;
+}
+
+#define btree_err(type, c, ca, b, i, msg, ...) \
+({ \
+ int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\
+ \
+ if (_ret != -BCH_ERR_fsck_fix) { \
+ ret = _ret; \
+ goto fsck_err; \
+ } \
+ \
+ *saw_error = true; \
+})
+
+#define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
+
+/*
+ * When btree topology repair changes the start or end of a node, that might
+ * mean we have to drop keys that are no longer inside the node:
+ */
+__cold
+void bch2_btree_node_drop_keys_outside_node(struct btree *b)
+{
+ struct bset_tree *t;
+
+ for_each_bset(b, t) {
+ struct bset *i = bset(b, t);
+ struct bkey_packed *k;
+
+ for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
+ if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
+ break;
+
+ if (k != i->start) {
+ unsigned shift = (u64 *) k - (u64 *) i->start;
+
+ memmove_u64s_down(i->start, k,
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
+ set_btree_bset_end(b, t);
+ }
+
+ for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
+ if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
+ break;
+
+ if (k != vstruct_last(i)) {
+ i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
+ set_btree_bset_end(b, t);
+ }
+ }
+
+ /*
+ * Always rebuild search trees: eytzinger search tree nodes directly
+ * depend on the values of min/max key:
+ */
+ bch2_bset_set_no_aux_tree(b, b->set);
+ bch2_btree_build_aux_trees(b);
+
+ struct bkey_s_c k;
+ struct bkey unpacked;
+ struct btree_node_iter iter;
+ for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
+ BUG_ON(bpos_lt(k.k->p, b->data->min_key));
+ BUG_ON(bpos_gt(k.k->p, b->data->max_key));
+ }
+}
+
+static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
+ struct btree *b, struct bset *i,
+ unsigned offset, unsigned sectors,
+ int write, bool have_retry, bool *saw_error)
+{
+ unsigned version = le16_to_cpu(i->version);
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ int ret = 0;
+
+ btree_err_on(!bch2_version_compatible(version),
+ -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
+ "unsupported bset version %u.%u",
+ BCH_VERSION_MAJOR(version),
+ BCH_VERSION_MINOR(version));
+
+ if (btree_err_on(version < c->sb.version_min,
+ -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ "bset version %u older than superblock version_min %u",
+ version, c->sb.version_min)) {
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->version_min = cpu_to_le16(version);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ if (btree_err_on(BCH_VERSION_MAJOR(version) >
+ BCH_VERSION_MAJOR(c->sb.version),
+ -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ "bset version %u newer than superblock version %u",
+ version, c->sb.version)) {
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->version = cpu_to_le16(version);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
+ -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
+ "BSET_SEPARATE_WHITEOUTS no longer supported");
+
+ if (btree_err_on(offset + sectors > btree_sectors(c),
+ -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ "bset past end of btree node")) {
+ i->u64s = 0;
+ ret = 0;
+ goto out;
+ }
+
+ btree_err_on(offset && !i->u64s,
+ -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ "empty bset");
+
+ btree_err_on(BSET_OFFSET(i) &&
+ BSET_OFFSET(i) != offset,
+ -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ "bset at wrong sector offset");
+
+ if (!offset) {
+ struct btree_node *bn =
+ container_of(i, struct btree_node, keys);
+ /* These indicate that we read the wrong btree node: */
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ /* XXX endianness */
+ btree_err_on(bp->seq != bn->keys.seq,
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ "incorrect sequence number (wrong btree node)");
+ }
+
+ btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+ "incorrect btree id");
+
+ btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+ "incorrect level");
+
+ if (!write)
+ compat_btree_node(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write, bn);
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ if (BTREE_PTR_RANGE_UPDATED(bp)) {
+ b->data->min_key = bp->min_key;
+ b->data->max_key = b->key.k.p;
+ }
+
+ btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ "incorrect min_key: got %s should be %s",
+ (printbuf_reset(&buf1),
+ bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
+ (printbuf_reset(&buf2),
+ bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
+ }
+
+ btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+ "incorrect max key %s",
+ (printbuf_reset(&buf1),
+ bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
+
+ if (write)
+ compat_btree_node(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write, bn);
+
+ btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
+ -BCH_ERR_btree_node_read_err_bad_node, c, ca, b, i,
+ "invalid bkey format: %s\n %s", buf1.buf,
+ (printbuf_reset(&buf2),
+ bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
+ printbuf_reset(&buf1);
+
+ compat_bformat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &bn->format);
+ }
+out:
+fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ return ret;
+}
+
+static int bset_key_invalid(struct bch_fs *c, struct btree *b,
+ struct bkey_s_c k,
+ bool updated_range, int rw,
+ struct printbuf *err)
+{
+ return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
+ (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?:
+ (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
+}
+
+static int validate_bset_keys(struct bch_fs *c, struct btree *b,
+ struct bset *i, int write,
+ bool have_retry, bool *saw_error)
+{
+ unsigned version = le16_to_cpu(i->version);
+ struct bkey_packed *k, *prev = NULL;
+ struct printbuf buf = PRINTBUF;
+ bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
+ int ret = 0;
+
+ for (k = i->start;
+ k != vstruct_last(i);) {
+ struct bkey_s u;
+ struct bkey tmp;
+
+ if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
+ -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ "key extends past end of bset")) {
+ i->u64s = cpu_to_le16((u64 *) k - i->_data);
+ break;
+ }
+
+ if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
+ -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+ "invalid bkey format %u", k->format)) {
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
+ memmove_u64s_down(k, bkey_p_next(k),
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ continue;
+ }
+
+ /* XXX: validate k->u64s */
+ if (!write)
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &b->format, k);
+
+ u = __bkey_disassemble(b, k, &tmp);
+
+ printbuf_reset(&buf);
+ if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "invalid bkey: ");
+ bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, u.s_c);
+
+ btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
+
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
+ memmove_u64s_down(k, bkey_p_next(k),
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ continue;
+ }
+
+ if (write)
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
+ BSET_BIG_ENDIAN(i), write,
+ &b->format, k);
+
+ if (prev && bkey_iter_cmp(b, prev, k) > 0) {
+ struct bkey up = bkey_unpack_key(b, prev);
+
+ printbuf_reset(&buf);
+ prt_printf(&buf, "keys out of order: ");
+ bch2_bkey_to_text(&buf, &up);
+ prt_printf(&buf, " > ");
+ bch2_bkey_to_text(&buf, u.k);
+
+ bch2_dump_bset(c, b, i, 0);
+
+ if (btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf)) {
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
+ memmove_u64s_down(k, bkey_p_next(k),
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ continue;
+ }
+ }
+
+ prev = k;
+ k = bkey_p_next(k);
+ }
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
+ struct btree *b, bool have_retry, bool *saw_error)
+{
+ struct btree_node_entry *bne;
+ struct sort_iter *iter;
+ struct btree_node *sorted;
+ struct bkey_packed *k;
+ struct bch_extent_ptr *ptr;
+ struct bset *i;
+ bool used_mempool, blacklisted;
+ bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
+ unsigned u64s;
+ unsigned ptr_written = btree_ptr_sectors_written(&b->key);
+ struct printbuf buf = PRINTBUF;
+ int ret = 0, retry_read = 0, write = READ;
+
+ b->version_ondisk = U16_MAX;
+ /* We might get called multiple times on read retry: */
+ b->written = 0;
+
+ iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
+ sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
+
+ if (bch2_meta_read_fault("btree"))
+ btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ "dynamic fault");
+
+ btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ "bad magic: want %llx, got %llx",
+ bset_magic(c), le64_to_cpu(b->data->magic));
+
+ btree_err_on(!b->data->keys.seq,
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ "bad btree header: seq 0");
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bch_btree_ptr_v2 *bp =
+ &bkey_i_to_btree_ptr_v2(&b->key)->v;
+
+ btree_err_on(b->data->keys.seq != bp->seq,
+ -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+ "got wrong btree node (seq %llx want %llx)",
+ b->data->keys.seq, bp->seq);
+ }
+
+ while (b->written < (ptr_written ?: btree_sectors(c))) {
+ unsigned sectors;
+ struct nonce nonce;
+ struct bch_csum csum;
+ bool first = !b->written;
+
+ if (!b->written) {
+ i = &b->data->keys;
+
+ btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
+ -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ "unknown checksum type %llu",
+ BSET_CSUM_TYPE(i));
+
+ nonce = btree_nonce(i, b->written << 9);
+ csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
+
+ btree_err_on(bch2_crc_cmp(csum, b->data->csum),
+ -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ "invalid checksum");
+
+ ret = bset_encrypt(c, i, b->written << 9);
+ if (bch2_fs_fatal_err_on(ret, c,
+ "error decrypting btree node: %i", ret))
+ goto fsck_err;
+
+ btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
+ !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
+ -BCH_ERR_btree_node_read_err_incompatible, c, NULL, b, NULL,
+ "btree node does not have NEW_EXTENT_OVERWRITE set");
+
+ sectors = vstruct_sectors(b->data, c->block_bits);
+ } else {
+ bne = write_block(b);
+ i = &bne->keys;
+
+ if (i->seq != b->data->keys.seq)
+ break;
+
+ btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
+ -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ "unknown checksum type %llu",
+ BSET_CSUM_TYPE(i));
+
+ nonce = btree_nonce(i, b->written << 9);
+ csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
+
+ btree_err_on(bch2_crc_cmp(csum, bne->csum),
+ -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+ "invalid checksum");
+
+ ret = bset_encrypt(c, i, b->written << 9);
+ if (bch2_fs_fatal_err_on(ret, c,
+ "error decrypting btree node: %i\n", ret))
+ goto fsck_err;
+
+ sectors = vstruct_sectors(bne, c->block_bits);
+ }
+
+ b->version_ondisk = min(b->version_ondisk,
+ le16_to_cpu(i->version));
+
+ ret = validate_bset(c, ca, b, i, b->written, sectors,
+ READ, have_retry, saw_error);
+ if (ret)
+ goto fsck_err;
+
+ if (!b->written)
+ btree_node_set_format(b, b->data->format);
+
+ ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
+ if (ret)
+ goto fsck_err;
+
+ SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
+
+ blacklisted = bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(i->journal_seq),
+ true);
+
+ btree_err_on(blacklisted && first,
+ -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ "first btree node bset has blacklisted journal seq (%llu)",
+ le64_to_cpu(i->journal_seq));
+
+ btree_err_on(blacklisted && ptr_written,
+ -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+ "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
+ le64_to_cpu(i->journal_seq),
+ b->written, b->written + sectors, ptr_written);
+
+ b->written += sectors;
+
+ if (blacklisted && !first)
+ continue;
+
+ sort_iter_add(iter,
+ vstruct_idx(i, 0),
+ vstruct_last(i));
+ }
+
+ if (ptr_written) {
+ btree_err_on(b->written < ptr_written,
+ -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
+ "btree node data missing: expected %u sectors, found %u",
+ ptr_written, b->written);
+ } else {
+ for (bne = write_block(b);
+ bset_byte_offset(b, bne) < btree_bytes(c);
+ bne = (void *) bne + block_bytes(c))
+ btree_err_on(bne->keys.seq == b->data->keys.seq &&
+ !bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq),
+ true),
+ -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
+ "found bset signature after last bset");
+ }
+
+ sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
+ sorted->keys.u64s = 0;
+
+ set_btree_bset(b, b->set, &b->data->keys);
+
+ b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
+
+ u64s = le16_to_cpu(sorted->keys.u64s);
+ *sorted = *b->data;
+ sorted->keys.u64s = cpu_to_le16(u64s);
+ swap(sorted, b->data);
+ set_btree_bset(b, b->set, &b->data->keys);
+ b->nsets = 1;
+
+ BUG_ON(b->nr.live_u64s != u64s);
+
+ btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
+
+ if (updated_range)
+ bch2_btree_node_drop_keys_outside_node(b);
+
+ i = &b->data->keys;
+ for (k = i->start; k != vstruct_last(i);) {
+ struct bkey tmp;
+ struct bkey_s u = __bkey_disassemble(b, k, &tmp);
+
+ printbuf_reset(&buf);
+
+ if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
+ (bch2_inject_invalid_keys &&
+ !bversion_cmp(u.k->version, MAX_VERSION))) {
+ printbuf_reset(&buf);
+
+ prt_printf(&buf, "invalid bkey: ");
+ bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
+ prt_printf(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, u.s_c);
+
+ btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
+
+ btree_keys_account_key_drop(&b->nr, 0, k);
+
+ i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
+ memmove_u64s_down(k, bkey_p_next(k),
+ (u64 *) vstruct_end(i) - (u64 *) k);
+ set_btree_bset_end(b, b->set);
+ continue;
+ }
+
+ if (u.k->type == KEY_TYPE_btree_ptr_v2) {
+ struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
+
+ bp.v->mem_ptr = 0;
+ }
+
+ k = bkey_p_next(k);
+ }
+
+ bch2_bset_build_aux_tree(b, b->set, false);
+
+ set_needs_whiteout(btree_bset_first(b), true);
+
+ btree_node_reset_sib_u64s(b);
+
+ bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
+ struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
+
+ if (ca2->mi.state != BCH_MEMBER_STATE_rw)
+ set_btree_node_need_rewrite(b);
+ }
+
+ if (!ptr_written)
+ set_btree_node_need_rewrite(b);
+out:
+ mempool_free(iter, &c->fill_iter);
+ printbuf_exit(&buf);
+ return retry_read;
+fsck_err:
+ if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
+ ret == -BCH_ERR_btree_node_read_err_must_retry)
+ retry_read = 1;
+ else
+ set_btree_node_read_error(b);
+ goto out;
+}
+
+static void btree_node_read_work(struct work_struct *work)
+{
+ struct btree_read_bio *rb =
+ container_of(work, struct btree_read_bio, work);
+ struct bch_fs *c = rb->c;
+ struct btree *b = rb->b;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ struct bio *bio = &rb->bio;
+ struct bch_io_failures failed = { .nr = 0 };
+ struct printbuf buf = PRINTBUF;
+ bool saw_error = false;
+ bool retry = false;
+ bool can_retry;
+
+ goto start;
+ while (1) {
+ retry = true;
+ bch_info(c, "retrying read");
+ ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
+ bio->bi_iter.bi_sector = rb->pick.ptr.offset;
+ bio->bi_iter.bi_size = btree_bytes(c);
+
+ if (rb->have_ioref) {
+ bio_set_dev(bio, ca->disk_sb.bdev);
+ submit_bio_wait(bio);
+ } else {
+ bio->bi_status = BLK_STS_REMOVED;
+ }
+start:
+ printbuf_reset(&buf);
+ btree_pos_to_text(&buf, c, b);
+ bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
+ bch2_blk_status_to_str(bio->bi_status), buf.buf);
+ if (rb->have_ioref)
+ percpu_ref_put(&ca->io_ref);
+ rb->have_ioref = false;
+
+ bch2_mark_io_failure(&failed, &rb->pick);
+
+ can_retry = bch2_bkey_pick_read_device(c,
+ bkey_i_to_s_c(&b->key),
+ &failed, &rb->pick) > 0;
+
+ if (!bio->bi_status &&
+ !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
+ if (retry)
+ bch_info(c, "retry success");
+ break;
+ }
+
+ saw_error = true;
+
+ if (!can_retry) {
+ set_btree_node_read_error(b);
+ break;
+ }
+ }
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
+ rb->start_time);
+ bio_put(&rb->bio);
+
+ if (saw_error && !btree_node_read_error(b)) {
+ printbuf_reset(&buf);
+ bch2_bpos_to_text(&buf, b->key.k.p);
+ bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
+ __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
+
+ bch2_btree_node_rewrite_async(c, b);
+ }
+
+ printbuf_exit(&buf);
+ clear_btree_node_read_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
+}
+
+static void btree_node_read_endio(struct bio *bio)
+{
+ struct btree_read_bio *rb =
+ container_of(bio, struct btree_read_bio, bio);
+ struct bch_fs *c = rb->c;
+
+ if (rb->have_ioref) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
+ bch2_latency_acct(ca, rb->start_time, READ);
+ }
+
+ queue_work(c->io_complete_wq, &rb->work);
+}
+
+struct btree_node_read_all {
+ struct closure cl;
+ struct bch_fs *c;
+ struct btree *b;
+ unsigned nr;
+ void *buf[BCH_REPLICAS_MAX];
+ struct bio *bio[BCH_REPLICAS_MAX];
+ blk_status_t err[BCH_REPLICAS_MAX];
+};
+
+static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
+{
+ struct btree_node *bn = data;
+ struct btree_node_entry *bne;
+ unsigned offset = 0;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c))
+ return 0;
+
+ while (offset < btree_sectors(c)) {
+ if (!offset) {
+ offset += vstruct_sectors(bn, c->block_bits);
+ } else {
+ bne = data + (offset << 9);
+ if (bne->keys.seq != bn->keys.seq)
+ break;
+ offset += vstruct_sectors(bne, c->block_bits);
+ }
+ }
+
+ return offset;
+}
+
+static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
+{
+ struct btree_node *bn = data;
+ struct btree_node_entry *bne;
+
+ if (!offset)
+ return false;
+
+ while (offset < btree_sectors(c)) {
+ bne = data + (offset << 9);
+ if (bne->keys.seq == bn->keys.seq)
+ return true;
+ offset++;
+ }
+
+ return false;
+ return offset;
+}
+
+static void btree_node_read_all_replicas_done(struct closure *cl)
+{
+ struct btree_node_read_all *ra =
+ container_of(cl, struct btree_node_read_all, cl);
+ struct bch_fs *c = ra->c;
+ struct btree *b = ra->b;
+ struct printbuf buf = PRINTBUF;
+ bool dump_bset_maps = false;
+ bool have_retry = false;
+ int ret = 0, best = -1, write = READ;
+ unsigned i, written = 0, written2 = 0;
+ __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
+ ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
+ bool _saw_error = false, *saw_error = &_saw_error;
+
+ for (i = 0; i < ra->nr; i++) {
+ struct btree_node *bn = ra->buf[i];
+
+ if (ra->err[i])
+ continue;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c) ||
+ (seq && seq != bn->keys.seq))
+ continue;
+
+ if (best < 0) {
+ best = i;
+ written = btree_node_sectors_written(c, bn);
+ continue;
+ }
+
+ written2 = btree_node_sectors_written(c, ra->buf[i]);
+ if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+ "btree node sectors written mismatch: %u != %u",
+ written, written2) ||
+ btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
+ -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+ "found bset signature after last bset") ||
+ btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
+ -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+ "btree node replicas content mismatch"))
+ dump_bset_maps = true;
+
+ if (written2 > written) {
+ written = written2;
+ best = i;
+ }
+ }
+fsck_err:
+ if (dump_bset_maps) {
+ for (i = 0; i < ra->nr; i++) {
+ struct btree_node *bn = ra->buf[i];
+ struct btree_node_entry *bne = NULL;
+ unsigned offset = 0, sectors;
+ bool gap = false;
+
+ if (ra->err[i])
+ continue;
+
+ printbuf_reset(&buf);
+
+ while (offset < btree_sectors(c)) {
+ if (!offset) {
+ sectors = vstruct_sectors(bn, c->block_bits);
+ } else {
+ bne = ra->buf[i] + (offset << 9);
+ if (bne->keys.seq != bn->keys.seq)
+ break;
+ sectors = vstruct_sectors(bne, c->block_bits);
+ }
+
+ prt_printf(&buf, " %u-%u", offset, offset + sectors);
+ if (bne && bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq), false))
+ prt_printf(&buf, "*");
+ offset += sectors;
+ }
+
+ while (offset < btree_sectors(c)) {
+ bne = ra->buf[i] + (offset << 9);
+ if (bne->keys.seq == bn->keys.seq) {
+ if (!gap)
+ prt_printf(&buf, " GAP");
+ gap = true;
+
+ sectors = vstruct_sectors(bne, c->block_bits);
+ prt_printf(&buf, " %u-%u", offset, offset + sectors);
+ if (bch2_journal_seq_is_blacklisted(c,
+ le64_to_cpu(bne->keys.journal_seq), false))
+ prt_printf(&buf, "*");
+ }
+ offset++;
+ }
+
+ bch_err(c, "replica %u:%s", i, buf.buf);
+ }
+ }
+
+ if (best >= 0) {
+ memcpy(b->data, ra->buf[best], btree_bytes(c));
+ ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
+ } else {
+ ret = -1;
+ }
+
+ if (ret)
+ set_btree_node_read_error(b);
+ else if (*saw_error)
+ bch2_btree_node_rewrite_async(c, b);
+
+ for (i = 0; i < ra->nr; i++) {
+ mempool_free(ra->buf[i], &c->btree_bounce_pool);
+ bio_put(ra->bio[i]);
+ }
+
+ closure_debug_destroy(&ra->cl);
+ kfree(ra);
+ printbuf_exit(&buf);
+
+ clear_btree_node_read_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
+}
+
+static void btree_node_read_all_replicas_endio(struct bio *bio)
+{
+ struct btree_read_bio *rb =
+ container_of(bio, struct btree_read_bio, bio);
+ struct bch_fs *c = rb->c;
+ struct btree_node_read_all *ra = rb->ra;
+
+ if (rb->have_ioref) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+
+ bch2_latency_acct(ca, rb->start_time, READ);
+ }
+
+ ra->err[rb->idx] = bio->bi_status;
+ closure_put(&ra->cl);
+}
+
+/*
+ * XXX This allocates multiple times from the same mempools, and can deadlock
+ * under sufficient memory pressure (but is only a debug path)
+ */
+static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
+{
+ struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded pick;
+ struct btree_node_read_all *ra;
+ unsigned i;
+
+ ra = kzalloc(sizeof(*ra), GFP_NOFS);
+ if (!ra)
+ return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
+
+ closure_init(&ra->cl, NULL);
+ ra->c = c;
+ ra->b = b;
+ ra->nr = bch2_bkey_nr_ptrs(k);
+
+ for (i = 0; i < ra->nr; i++) {
+ ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
+ ra->bio[i] = bio_alloc_bioset(NULL,
+ buf_pages(ra->buf[i], btree_bytes(c)),
+ REQ_OP_READ|REQ_SYNC|REQ_META,
+ GFP_NOFS,
+ &c->btree_bio);
+ }
+
+ i = 0;
+ bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ struct btree_read_bio *rb =
+ container_of(ra->bio[i], struct btree_read_bio, bio);
+ rb->c = c;
+ rb->b = b;
+ rb->ra = ra;
+ rb->start_time = local_clock();
+ rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ rb->idx = i;
+ rb->pick = pick;
+ rb->bio.bi_iter.bi_sector = pick.ptr.offset;
+ rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
+ bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
+
+ if (rb->have_ioref) {
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
+ bio_sectors(&rb->bio));
+ bio_set_dev(&rb->bio, ca->disk_sb.bdev);
+
+ closure_get(&ra->cl);
+ submit_bio(&rb->bio);
+ } else {
+ ra->err[i] = BLK_STS_REMOVED;
+ }
+
+ i++;
+ }
+
+ if (sync) {
+ closure_sync(&ra->cl);
+ btree_node_read_all_replicas_done(&ra->cl);
+ } else {
+ continue_at(&ra->cl, btree_node_read_all_replicas_done,
+ c->io_complete_wq);
+ }
+
+ return 0;
+}
+
+void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
+ bool sync)
+{
+ struct extent_ptr_decoded pick;
+ struct btree_read_bio *rb;
+ struct bch_dev *ca;
+ struct bio *bio;
+ int ret;
+
+ trace_and_count(c, btree_node_read, c, b);
+
+ if (bch2_verify_all_btree_replicas &&
+ !btree_node_read_all_replicas(c, b, sync))
+ return;
+
+ ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
+ NULL, &pick);
+
+ if (ret <= 0) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "btree node read error: no device to read from\n at ");
+ btree_pos_to_text(&buf, c, b);
+ bch_err(c, "%s", buf.buf);
+
+ if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
+ bch2_fatal_error(c);
+
+ set_btree_node_read_error(b);
+ clear_btree_node_read_in_flight(b);
+ wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
+ printbuf_exit(&buf);
+ return;
+ }
+
+ ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+
+ bio = bio_alloc_bioset(NULL,
+ buf_pages(b->data, btree_bytes(c)),
+ REQ_OP_READ|REQ_SYNC|REQ_META,
+ GFP_NOFS,
+ &c->btree_bio);
+ rb = container_of(bio, struct btree_read_bio, bio);
+ rb->c = c;
+ rb->b = b;
+ rb->ra = NULL;
+ rb->start_time = local_clock();
+ rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ rb->pick = pick;
+ INIT_WORK(&rb->work, btree_node_read_work);
+ bio->bi_iter.bi_sector = pick.ptr.offset;
+ bio->bi_end_io = btree_node_read_endio;
+ bch2_bio_map(bio, b->data, btree_bytes(c));
+
+ if (rb->have_ioref) {
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
+ bio_sectors(bio));
+ bio_set_dev(bio, ca->disk_sb.bdev);
+
+ if (sync) {
+ submit_bio_wait(bio);
+
+ btree_node_read_work(&rb->work);
+ } else {
+ submit_bio(bio);
+ }
+ } else {
+ bio->bi_status = BLK_STS_REMOVED;
+
+ if (sync)
+ btree_node_read_work(&rb->work);
+ else
+ queue_work(c->io_complete_wq, &rb->work);
+ }
+}
+
+static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
+ const struct bkey_i *k, unsigned level)
+{
+ struct bch_fs *c = trans->c;
+ struct closure cl;
+ struct btree *b;
+ int ret;
+
+ closure_init_stack(&cl);
+
+ do {
+ ret = bch2_btree_cache_cannibalize_lock(c, &cl);
+ closure_sync(&cl);
+ } while (ret);
+
+ b = bch2_btree_node_mem_alloc(trans, level != 0);
+ bch2_btree_cache_cannibalize_unlock(c);
+
+ BUG_ON(IS_ERR(b));
+
+ bkey_copy(&b->key, k);
+ BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
+
+ set_btree_node_read_in_flight(b);
+
+ bch2_btree_node_read(c, b, true);
+
+ if (btree_node_read_error(b)) {
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ mutex_lock(&c->btree_cache.lock);
+ list_move(&b->list, &c->btree_cache.freeable);
+ mutex_unlock(&c->btree_cache.lock);
+
+ ret = -EIO;
+ goto err;
+ }
+
+ bch2_btree_set_root_for_read(c, b);
+err:
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+
+ return ret;
+}
+
+int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
+ const struct bkey_i *k, unsigned level)
+{
+ return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
+}
+
+void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
+ struct btree_write *w)
+{
+ unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
+
+ do {
+ old = new = v;
+ if (!(old & 1))
+ break;
+
+ new &= ~1UL;
+ } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
+
+ if (old & 1)
+ closure_put(&((struct btree_update *) new)->cl);
+
+ bch2_journal_pin_drop(&c->journal, &w->journal);
+}
+
+static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
+{
+ struct btree_write *w = btree_prev_write(b);
+ unsigned long old, new, v;
+ unsigned type = 0;
+
+ bch2_btree_complete_write(c, b, w);
+
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
+
+ if ((old & (1U << BTREE_NODE_dirty)) &&
+ (old & (1U << BTREE_NODE_need_write)) &&
+ !(old & (1U << BTREE_NODE_never_write)) &&
+ !(old & (1U << BTREE_NODE_write_blocked)) &&
+ !(old & (1U << BTREE_NODE_will_make_reachable))) {
+ new &= ~(1U << BTREE_NODE_dirty);
+ new &= ~(1U << BTREE_NODE_need_write);
+ new |= (1U << BTREE_NODE_write_in_flight);
+ new |= (1U << BTREE_NODE_write_in_flight_inner);
+ new |= (1U << BTREE_NODE_just_written);
+ new ^= (1U << BTREE_NODE_write_idx);
+
+ type = new & BTREE_WRITE_TYPE_MASK;
+ new &= ~BTREE_WRITE_TYPE_MASK;
+ } else {
+ new &= ~(1U << BTREE_NODE_write_in_flight);
+ new &= ~(1U << BTREE_NODE_write_in_flight_inner);
+ }
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
+
+ if (new & (1U << BTREE_NODE_write_in_flight))
+ __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
+ else
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
+}
+
+static void btree_node_write_done(struct bch_fs *c, struct btree *b)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
+ __btree_node_write_done(c, b);
+ six_unlock_read(&b->c.lock);
+
+ bch2_trans_put(trans);
+}
+
+static void btree_node_write_work(struct work_struct *work)
+{
+ struct btree_write_bio *wbio =
+ container_of(work, struct btree_write_bio, work);
+ struct bch_fs *c = wbio->wbio.c;
+ struct btree *b = wbio->wbio.bio.bi_private;
+ struct bch_extent_ptr *ptr;
+ int ret = 0;
+
+ btree_bounce_free(c,
+ wbio->data_bytes,
+ wbio->wbio.used_mempool,
+ wbio->data);
+
+ bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
+ bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
+
+ if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
+ goto err;
+
+ if (wbio->wbio.first_btree_write) {
+ if (wbio->wbio.failed.nr) {
+
+ }
+ } else {
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
+ BCH_WATERMARK_reclaim|
+ BTREE_INSERT_JOURNAL_RECLAIM|
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_NOCHECK_RW,
+ !wbio->wbio.failed.nr));
+ if (ret)
+ goto err;
+ }
+out:
+ bio_put(&wbio->wbio.bio);
+ btree_node_write_done(c, b);
+ return;
+err:
+ set_btree_node_noevict(b);
+ if (!bch2_err_matches(ret, EROFS))
+ bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret));
+ goto out;
+}
+
+static void btree_node_write_endio(struct bio *bio)
+{
+ struct bch_write_bio *wbio = to_wbio(bio);
+ struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
+ struct bch_write_bio *orig = parent ?: wbio;
+ struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
+ struct bch_fs *c = wbio->c;
+ struct btree *b = wbio->bio.bi_private;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
+ unsigned long flags;
+
+ if (wbio->have_ioref)
+ bch2_latency_acct(ca, wbio->submit_time, WRITE);
+
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
+ bch2_blk_status_to_str(bio->bi_status)) ||
+ bch2_meta_write_fault("btree")) {
+ spin_lock_irqsave(&c->btree_write_error_lock, flags);
+ bch2_dev_list_add_dev(&orig->failed, wbio->dev);
+ spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
+ }
+
+ if (wbio->have_ioref)
+ percpu_ref_put(&ca->io_ref);
+
+ if (parent) {
+ bio_put(bio);
+ bio_endio(&parent->bio);
+ return;
+ }
+
+ clear_btree_node_write_in_flight_inner(b);
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
+ INIT_WORK(&wb->work, btree_node_write_work);
+ queue_work(c->btree_io_complete_wq, &wb->work);
+}
+
+static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
+ struct bset *i, unsigned sectors)
+{
+ struct printbuf buf = PRINTBUF;
+ bool saw_error;
+ int ret;
+
+ ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
+ BKEY_TYPE_btree, WRITE, &buf);
+
+ if (ret)
+ bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
+ printbuf_exit(&buf);
+ if (ret)
+ return ret;
+
+ ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
+ validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
+ if (ret) {
+ bch2_inconsistent_error(c);
+ dump_stack();
+ }
+
+ return ret;
+}
+
+static void btree_write_submit(struct work_struct *work)
+{
+ struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
+ struct bch_extent_ptr *ptr;
+ BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+
+ bkey_copy(&tmp.k, &wbio->key);
+
+ bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
+ ptr->offset += wbio->sector_offset;
+
+ bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
+ &tmp.k, false);
+}
+
+void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
+{
+ struct btree_write_bio *wbio;
+ struct bset_tree *t;
+ struct bset *i;
+ struct btree_node *bn = NULL;
+ struct btree_node_entry *bne = NULL;
+ struct sort_iter_stack sort_iter;
+ struct nonce nonce;
+ unsigned bytes_to_write, sectors_to_write, bytes, u64s;
+ u64 seq = 0;
+ bool used_mempool;
+ unsigned long old, new;
+ bool validate_before_checksum = false;
+ enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
+ void *data;
+ int ret;
+
+ if (flags & BTREE_WRITE_ALREADY_STARTED)
+ goto do_write;
+
+ /*
+ * We may only have a read lock on the btree node - the dirty bit is our
+ * "lock" against racing with other threads that may be trying to start
+ * a write, we do a write iff we clear the dirty bit. Since setting the
+ * dirty bit requires a write lock, we can't race with other threads
+ * redirtying it:
+ */
+ do {
+ old = new = READ_ONCE(b->flags);
+
+ if (!(old & (1 << BTREE_NODE_dirty)))
+ return;
+
+ if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
+ !(old & (1 << BTREE_NODE_need_write)))
+ return;
+
+ if (old &
+ ((1 << BTREE_NODE_never_write)|
+ (1 << BTREE_NODE_write_blocked)))
+ return;
+
+ if (b->written &&
+ (old & (1 << BTREE_NODE_will_make_reachable)))
+ return;
+
+ if (old & (1 << BTREE_NODE_write_in_flight))
+ return;
+
+ if (flags & BTREE_WRITE_ONLY_IF_NEED)
+ type = new & BTREE_WRITE_TYPE_MASK;
+ new &= ~BTREE_WRITE_TYPE_MASK;
+
+ new &= ~(1 << BTREE_NODE_dirty);
+ new &= ~(1 << BTREE_NODE_need_write);
+ new |= (1 << BTREE_NODE_write_in_flight);
+ new |= (1 << BTREE_NODE_write_in_flight_inner);
+ new |= (1 << BTREE_NODE_just_written);
+ new ^= (1 << BTREE_NODE_write_idx);
+ } while (cmpxchg_acquire(&b->flags, old, new) != old);
+
+ if (new & (1U << BTREE_NODE_need_write))
+ return;
+do_write:
+ BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
+
+ atomic_dec(&c->btree_cache.dirty);
+
+ BUG_ON(btree_node_fake(b));
+ BUG_ON((b->will_make_reachable != 0) != !b->written);
+
+ BUG_ON(b->written >= btree_sectors(c));
+ BUG_ON(b->written & (block_sectors(c) - 1));
+ BUG_ON(bset_written(b, btree_bset_last(b)));
+ BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
+ BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
+
+ bch2_sort_whiteouts(c, b);
+
+ sort_iter_stack_init(&sort_iter, b);
+
+ bytes = !b->written
+ ? sizeof(struct btree_node)
+ : sizeof(struct btree_node_entry);
+
+ bytes += b->whiteout_u64s * sizeof(u64);
+
+ for_each_bset(b, t) {
+ i = bset(b, t);
+
+ if (bset_written(b, i))
+ continue;
+
+ bytes += le16_to_cpu(i->u64s) * sizeof(u64);
+ sort_iter_add(&sort_iter.iter,
+ btree_bkey_first(b, t),
+ btree_bkey_last(b, t));
+ seq = max(seq, le64_to_cpu(i->journal_seq));
+ }
+
+ BUG_ON(b->written && !seq);
+
+ /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
+ bytes += 8;
+
+ /* buffer must be a multiple of the block size */
+ bytes = round_up(bytes, block_bytes(c));
+
+ data = btree_bounce_alloc(c, bytes, &used_mempool);
+
+ if (!b->written) {
+ bn = data;
+ *bn = *b->data;
+ i = &bn->keys;
+ } else {
+ bne = data;
+ bne->keys = b->data->keys;
+ i = &bne->keys;
+ }
+
+ i->journal_seq = cpu_to_le64(seq);
+ i->u64s = 0;
+
+ sort_iter_add(&sort_iter.iter,
+ unwritten_whiteouts_start(c, b),
+ unwritten_whiteouts_end(c, b));
+ SET_BSET_SEPARATE_WHITEOUTS(i, false);
+
+ b->whiteout_u64s = 0;
+
+ u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
+ le16_add_cpu(&i->u64s, u64s);
+
+ BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
+
+ set_needs_whiteout(i, false);
+
+ /* do we have data to write? */
+ if (b->written && !i->u64s)
+ goto nowrite;
+
+ bytes_to_write = vstruct_end(i) - data;
+ sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
+
+ if (!b->written &&
+ b->key.k.type == KEY_TYPE_btree_ptr_v2)
+ BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
+
+ memset(data + bytes_to_write, 0,
+ (sectors_to_write << 9) - bytes_to_write);
+
+ BUG_ON(b->written + sectors_to_write > btree_sectors(c));
+ BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
+ BUG_ON(i->seq != b->data->keys.seq);
+
+ i->version = cpu_to_le16(c->sb.version);
+ SET_BSET_OFFSET(i, b->written);
+ SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
+
+ if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
+ validate_before_checksum = true;
+
+ /* validate_bset will be modifying: */
+ if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
+ validate_before_checksum = true;
+
+ /* if we're going to be encrypting, check metadata validity first: */
+ if (validate_before_checksum &&
+ validate_bset_for_write(c, b, i, sectors_to_write))
+ goto err;
+
+ ret = bset_encrypt(c, i, b->written << 9);
+ if (bch2_fs_fatal_err_on(ret, c,
+ "error encrypting btree node: %i\n", ret))
+ goto err;
+
+ nonce = btree_nonce(i, b->written << 9);
+
+ if (bn)
+ bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
+ else
+ bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
+
+ /* if we're not encrypting, check metadata after checksumming: */
+ if (!validate_before_checksum &&
+ validate_bset_for_write(c, b, i, sectors_to_write))
+ goto err;
+
+ /*
+ * We handle btree write errors by immediately halting the journal -
+ * after we've done that, we can't issue any subsequent btree writes
+ * because they might have pointers to new nodes that failed to write.
+ *
+ * Furthermore, there's no point in doing any more btree writes because
+ * with the journal stopped, we're never going to update the journal to
+ * reflect that those writes were done and the data flushed from the
+ * journal:
+ *
+ * Also on journal error, the pending write may have updates that were
+ * never journalled (interior nodes, see btree_update_nodes_written()) -
+ * it's critical that we don't do the write in that case otherwise we
+ * will have updates visible that weren't in the journal:
+ *
+ * Make sure to update b->written so bch2_btree_init_next() doesn't
+ * break:
+ */
+ if (bch2_journal_error(&c->journal) ||
+ c->opts.nochanges)
+ goto err;
+
+ trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
+
+ wbio = container_of(bio_alloc_bioset(NULL,
+ buf_pages(data, sectors_to_write << 9),
+ REQ_OP_WRITE|REQ_META,
+ GFP_NOFS,
+ &c->btree_bio),
+ struct btree_write_bio, wbio.bio);
+ wbio_init(&wbio->wbio.bio);
+ wbio->data = data;
+ wbio->data_bytes = bytes;
+ wbio->sector_offset = b->written;
+ wbio->wbio.c = c;
+ wbio->wbio.used_mempool = used_mempool;
+ wbio->wbio.first_btree_write = !b->written;
+ wbio->wbio.bio.bi_end_io = btree_node_write_endio;
+ wbio->wbio.bio.bi_private = b;
+
+ bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
+
+ bkey_copy(&wbio->key, &b->key);
+
+ b->written += sectors_to_write;
+
+ if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
+ bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
+ cpu_to_le16(b->written);
+
+ atomic64_inc(&c->btree_write_stats[type].nr);
+ atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
+
+ INIT_WORK(&wbio->work, btree_write_submit);
+ queue_work(c->io_complete_wq, &wbio->work);
+ return;
+err:
+ set_btree_node_noevict(b);
+ b->written += sectors_to_write;
+nowrite:
+ btree_bounce_free(c, bytes, used_mempool, data);
+ __btree_node_write_done(c, b);
+}
+
+/*
+ * Work that must be done with write lock held:
+ */
+bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
+{
+ bool invalidated_iter = false;
+ struct btree_node_entry *bne;
+ struct bset_tree *t;
+
+ if (!btree_node_just_written(b))
+ return false;
+
+ BUG_ON(b->whiteout_u64s);
+
+ clear_btree_node_just_written(b);
+
+ /*
+ * Note: immediately after write, bset_written() doesn't work - the
+ * amount of data we had to write after compaction might have been
+ * smaller than the offset of the last bset.
+ *
+ * However, we know that all bsets have been written here, as long as
+ * we're still holding the write lock:
+ */
+
+ /*
+ * XXX: decide if we really want to unconditionally sort down to a
+ * single bset:
+ */
+ if (b->nsets > 1) {
+ btree_node_sort(c, b, 0, b->nsets, true);
+ invalidated_iter = true;
+ } else {
+ invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
+ }
+
+ for_each_bset(b, t)
+ set_needs_whiteout(bset(b, t), true);
+
+ bch2_btree_verify(c, b);
+
+ /*
+ * If later we don't unconditionally sort down to a single bset, we have
+ * to ensure this is still true:
+ */
+ BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
+
+ bne = want_new_bset(c, b);
+ if (bne)
+ bch2_bset_init_next(c, b, bne);
+
+ bch2_btree_build_aux_trees(b);
+
+ return invalidated_iter;
+}
+
+/*
+ * Use this one if the node is intent locked:
+ */
+void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
+ enum six_lock_type lock_type_held,
+ unsigned flags)
+{
+ if (lock_type_held == SIX_LOCK_intent ||
+ (lock_type_held == SIX_LOCK_read &&
+ six_lock_tryupgrade(&b->c.lock))) {
+ __bch2_btree_node_write(c, b, flags);
+
+ /* don't cycle lock unnecessarily: */
+ if (btree_node_just_written(b) &&
+ six_trylock_write(&b->c.lock)) {
+ bch2_btree_post_write_cleanup(c, b);
+ six_unlock_write(&b->c.lock);
+ }
+
+ if (lock_type_held == SIX_LOCK_read)
+ six_lock_downgrade(&b->c.lock);
+ } else {
+ __bch2_btree_node_write(c, b, flags);
+ if (lock_type_held == SIX_LOCK_write &&
+ btree_node_just_written(b))
+ bch2_btree_post_write_cleanup(c, b);
+ }
+}
+
+static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
+{
+ struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct btree *b;
+ unsigned i;
+ bool ret = false;
+restart:
+ rcu_read_lock();
+ for_each_cached_btree(b, c, tbl, i, pos)
+ if (test_bit(flag, &b->flags)) {
+ rcu_read_unlock();
+ wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
+ ret = true;
+ goto restart;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+bool bch2_btree_flush_all_reads(struct bch_fs *c)
+{
+ return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
+}
+
+bool bch2_btree_flush_all_writes(struct bch_fs *c)
+{
+ return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
+}
+
+static const char * const bch2_btree_write_types[] = {
+#define x(t, n) [n] = #t,
+ BCH_BTREE_WRITE_TYPES()
+ NULL
+};
+
+void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ printbuf_tabstop_push(out, 20);
+ printbuf_tabstop_push(out, 10);
+
+ prt_tab(out);
+ prt_str(out, "nr");
+ prt_tab(out);
+ prt_str(out, "size");
+ prt_newline(out);
+
+ for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
+ u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
+ u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
+
+ prt_printf(out, "%s:", bch2_btree_write_types[i]);
+ prt_tab(out);
+ prt_u64(out, nr);
+ prt_tab(out);
+ prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
+ prt_newline(out);
+ }
+}
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
new file mode 100644
index 000000000000..7e03dd76fb38
--- /dev/null
+++ b/fs/bcachefs/btree_io.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_IO_H
+#define _BCACHEFS_BTREE_IO_H
+
+#include "bkey_methods.h"
+#include "bset.h"
+#include "btree_locking.h"
+#include "checksum.h"
+#include "extents.h"
+#include "io_write_types.h"
+
+struct bch_fs;
+struct btree_write;
+struct btree;
+struct btree_iter;
+struct btree_node_read_all;
+
+static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
+{
+ if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
+ atomic_inc(&c->btree_cache.dirty);
+}
+
+static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
+{
+ if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
+ atomic_dec(&c->btree_cache.dirty);
+}
+
+static inline unsigned btree_ptr_sectors_written(struct bkey_i *k)
+{
+ return k->k.type == KEY_TYPE_btree_ptr_v2
+ ? le16_to_cpu(bkey_i_to_btree_ptr_v2(k)->v.sectors_written)
+ : 0;
+}
+
+struct btree_read_bio {
+ struct bch_fs *c;
+ struct btree *b;
+ struct btree_node_read_all *ra;
+ u64 start_time;
+ unsigned have_ioref:1;
+ unsigned idx:7;
+ struct extent_ptr_decoded pick;
+ struct work_struct work;
+ struct bio bio;
+};
+
+struct btree_write_bio {
+ struct work_struct work;
+ __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
+ void *data;
+ unsigned data_bytes;
+ unsigned sector_offset;
+ struct bch_write_bio wbio;
+};
+
+void bch2_btree_node_io_unlock(struct btree *);
+void bch2_btree_node_io_lock(struct btree *);
+void __bch2_btree_node_wait_on_read(struct btree *);
+void __bch2_btree_node_wait_on_write(struct btree *);
+void bch2_btree_node_wait_on_read(struct btree *);
+void bch2_btree_node_wait_on_write(struct btree *);
+
+enum compact_mode {
+ COMPACT_LAZY,
+ COMPACT_ALL,
+};
+
+bool bch2_compact_whiteouts(struct bch_fs *, struct btree *,
+ enum compact_mode);
+
+static inline bool should_compact_bset_lazy(struct btree *b,
+ struct bset_tree *t)
+{
+ unsigned total_u64s = bset_u64s(t);
+ unsigned dead_u64s = bset_dead_u64s(b, t);
+
+ return dead_u64s > 64 && dead_u64s * 3 > total_u64s;
+}
+
+static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
+{
+ struct bset_tree *t;
+
+ for_each_bset(b, t)
+ if (should_compact_bset_lazy(b, t))
+ return bch2_compact_whiteouts(c, b, COMPACT_LAZY);
+
+ return false;
+}
+
+static inline struct nonce btree_nonce(struct bset *i, unsigned offset)
+{
+ return (struct nonce) {{
+ [0] = cpu_to_le32(offset),
+ [1] = ((__le32 *) &i->seq)[0],
+ [2] = ((__le32 *) &i->seq)[1],
+ [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
+ }};
+}
+
+static inline int bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
+{
+ struct nonce nonce = btree_nonce(i, offset);
+ int ret;
+
+ if (!offset) {
+ struct btree_node *bn = container_of(i, struct btree_node, keys);
+ unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
+
+ ret = bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
+ &bn->flags, bytes);
+ if (ret)
+ return ret;
+
+ nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
+ }
+
+ return bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
+ vstruct_end(i) - (void *) i->_data);
+}
+
+void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
+
+void bch2_btree_node_drop_keys_outside_node(struct btree *);
+
+void bch2_btree_build_aux_trees(struct btree *);
+void bch2_btree_init_next(struct btree_trans *, struct btree *);
+
+int bch2_btree_node_read_done(struct bch_fs *, struct bch_dev *,
+ struct btree *, bool, bool *);
+void bch2_btree_node_read(struct bch_fs *, struct btree *, bool);
+int bch2_btree_root_read(struct bch_fs *, enum btree_id,
+ const struct bkey_i *, unsigned);
+
+void bch2_btree_complete_write(struct bch_fs *, struct btree *,
+ struct btree_write *);
+
+bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
+
+enum btree_write_flags {
+ __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS,
+ __BTREE_WRITE_ALREADY_STARTED,
+};
+#define BTREE_WRITE_ONLY_IF_NEED BIT(__BTREE_WRITE_ONLY_IF_NEED)
+#define BTREE_WRITE_ALREADY_STARTED BIT(__BTREE_WRITE_ALREADY_STARTED)
+
+void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
+void bch2_btree_node_write(struct bch_fs *, struct btree *,
+ enum six_lock_type, unsigned);
+
+static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
+ enum six_lock_type lock_held)
+{
+ bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED);
+}
+
+bool bch2_btree_flush_all_reads(struct bch_fs *);
+bool bch2_btree_flush_all_writes(struct bch_fs *);
+
+static inline void compat_bformat(unsigned level, enum btree_id btree_id,
+ unsigned version, unsigned big_endian,
+ int write, struct bkey_format *f)
+{
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_id == BTREE_ID_inodes) {
+ swap(f->bits_per_field[BKEY_FIELD_INODE],
+ f->bits_per_field[BKEY_FIELD_OFFSET]);
+ swap(f->field_offset[BKEY_FIELD_INODE],
+ f->field_offset[BKEY_FIELD_OFFSET]);
+ }
+
+ if (version < bcachefs_metadata_version_snapshot &&
+ (level || btree_type_has_snapshots(btree_id))) {
+ u64 max_packed =
+ ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]);
+
+ f->field_offset[BKEY_FIELD_SNAPSHOT] = write
+ ? 0
+ : cpu_to_le64(U32_MAX - max_packed);
+ }
+}
+
+static inline void compat_bpos(unsigned level, enum btree_id btree_id,
+ unsigned version, unsigned big_endian,
+ int write, struct bpos *p)
+{
+ if (big_endian != CPU_BIG_ENDIAN)
+ bch2_bpos_swab(p);
+
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_id == BTREE_ID_inodes)
+ swap(p->inode, p->offset);
+}
+
+static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
+ unsigned version, unsigned big_endian,
+ int write,
+ struct btree_node *bn)
+{
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_id_is_extents(btree_id) &&
+ !bpos_eq(bn->min_key, POS_MIN) &&
+ write)
+ bn->min_key = bpos_nosnap_predecessor(bn->min_key);
+
+ if (version < bcachefs_metadata_version_snapshot &&
+ write)
+ bn->max_key.snapshot = 0;
+
+ compat_bpos(level, btree_id, version, big_endian, write, &bn->min_key);
+ compat_bpos(level, btree_id, version, big_endian, write, &bn->max_key);
+
+ if (version < bcachefs_metadata_version_snapshot &&
+ !write)
+ bn->max_key.snapshot = U32_MAX;
+
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_id_is_extents(btree_id) &&
+ !bpos_eq(bn->min_key, POS_MIN) &&
+ !write)
+ bn->min_key = bpos_nosnap_successor(bn->min_key);
+}
+
+void bch2_btree_write_stats_to_text(struct printbuf *, struct bch_fs *);
+
+#endif /* _BCACHEFS_BTREE_IO_H */
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
new file mode 100644
index 000000000000..1d79514754d7
--- /dev/null
+++ b/fs/bcachefs/btree_iter.c
@@ -0,0 +1,3215 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_methods.h"
+#include "bkey_buf.h"
+#include "btree_cache.h"
+#include "btree_iter.h"
+#include "btree_journal_iter.h"
+#include "btree_key_cache.h"
+#include "btree_locking.h"
+#include "btree_update.h"
+#include "debug.h"
+#include "error.h"
+#include "extents.h"
+#include "journal.h"
+#include "replicas.h"
+#include "snapshot.h"
+#include "trace.h"
+
+#include <linux/random.h>
+#include <linux/prefetch.h>
+
+static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
+static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
+ struct btree_path *);
+
+static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
+{
+#ifdef TRACK_PATH_ALLOCATED
+ return iter->ip_allocated;
+#else
+ return 0;
+#endif
+}
+
+static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
+
+static inline int __btree_path_cmp(const struct btree_path *l,
+ enum btree_id r_btree_id,
+ bool r_cached,
+ struct bpos r_pos,
+ unsigned r_level)
+{
+ /*
+ * Must match lock ordering as defined by __bch2_btree_node_lock:
+ */
+ return cmp_int(l->btree_id, r_btree_id) ?:
+ cmp_int((int) l->cached, (int) r_cached) ?:
+ bpos_cmp(l->pos, r_pos) ?:
+ -cmp_int(l->level, r_level);
+}
+
+static inline int btree_path_cmp(const struct btree_path *l,
+ const struct btree_path *r)
+{
+ return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
+}
+
+static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
+{
+ /* Are we iterating over keys in all snapshots? */
+ if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+ p = bpos_successor(p);
+ } else {
+ p = bpos_nosnap_successor(p);
+ p.snapshot = iter->snapshot;
+ }
+
+ return p;
+}
+
+static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
+{
+ /* Are we iterating over keys in all snapshots? */
+ if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+ p = bpos_predecessor(p);
+ } else {
+ p = bpos_nosnap_predecessor(p);
+ p.snapshot = iter->snapshot;
+ }
+
+ return p;
+}
+
+static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
+{
+ struct bpos pos = iter->pos;
+
+ if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ !bkey_eq(pos, POS_MAX))
+ pos = bkey_successor(iter, pos);
+ return pos;
+}
+
+static inline bool btree_path_pos_before_node(struct btree_path *path,
+ struct btree *b)
+{
+ return bpos_lt(path->pos, b->data->min_key);
+}
+
+static inline bool btree_path_pos_after_node(struct btree_path *path,
+ struct btree *b)
+{
+ return bpos_gt(path->pos, b->key.k.p);
+}
+
+static inline bool btree_path_pos_in_node(struct btree_path *path,
+ struct btree *b)
+{
+ return path->btree_id == b->c.btree_id &&
+ !btree_path_pos_before_node(path, b) &&
+ !btree_path_pos_after_node(path, b);
+}
+
+/* Btree iterator: */
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+static void bch2_btree_path_verify_cached(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ struct bkey_cached *ck;
+ bool locked = btree_node_locked(path, 0);
+
+ if (!bch2_btree_node_relock(trans, path, 0))
+ return;
+
+ ck = (void *) path->l[0].b;
+ BUG_ON(ck->key.btree_id != path->btree_id ||
+ !bkey_eq(ck->key.pos, path->pos));
+
+ if (!locked)
+ btree_node_unlock(trans, path, 0);
+}
+
+static void bch2_btree_path_verify_level(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+ struct btree_path_level *l;
+ struct btree_node_iter tmp;
+ bool locked;
+ struct bkey_packed *p, *k;
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ struct printbuf buf3 = PRINTBUF;
+ const char *msg;
+
+ if (!bch2_debug_check_iterators)
+ return;
+
+ l = &path->l[level];
+ tmp = l->iter;
+ locked = btree_node_locked(path, level);
+
+ if (path->cached) {
+ if (!level)
+ bch2_btree_path_verify_cached(trans, path);
+ return;
+ }
+
+ if (!btree_path_node(path, level))
+ return;
+
+ if (!bch2_btree_node_relock_notrace(trans, path, level))
+ return;
+
+ BUG_ON(!btree_path_pos_in_node(path, l->b));
+
+ bch2_btree_node_iter_verify(&l->iter, l->b);
+
+ /*
+ * For interior nodes, the iterator will have skipped past deleted keys:
+ */
+ p = level
+ ? bch2_btree_node_iter_prev(&tmp, l->b)
+ : bch2_btree_node_iter_prev_all(&tmp, l->b);
+ k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+
+ if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
+ msg = "before";
+ goto err;
+ }
+
+ if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
+ msg = "after";
+ goto err;
+ }
+
+ if (!locked)
+ btree_node_unlock(trans, path, level);
+ return;
+err:
+ bch2_bpos_to_text(&buf1, path->pos);
+
+ if (p) {
+ struct bkey uk = bkey_unpack_key(l->b, p);
+
+ bch2_bkey_to_text(&buf2, &uk);
+ } else {
+ prt_printf(&buf2, "(none)");
+ }
+
+ if (k) {
+ struct bkey uk = bkey_unpack_key(l->b, k);
+
+ bch2_bkey_to_text(&buf3, &uk);
+ } else {
+ prt_printf(&buf3, "(none)");
+ }
+
+ panic("path should be %s key at level %u:\n"
+ "path pos %s\n"
+ "prev key %s\n"
+ "cur key %s\n",
+ msg, level, buf1.buf, buf2.buf, buf3.buf);
+}
+
+static void bch2_btree_path_verify(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ struct bch_fs *c = trans->c;
+ unsigned i;
+
+ EBUG_ON(path->btree_id >= BTREE_ID_NR);
+
+ for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
+ if (!path->l[i].b) {
+ BUG_ON(!path->cached &&
+ bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
+ break;
+ }
+
+ bch2_btree_path_verify_level(trans, path, i);
+ }
+
+ bch2_btree_path_verify_locks(path);
+}
+
+void bch2_trans_verify_paths(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ bch2_btree_path_verify(trans, path);
+}
+
+static void bch2_btree_iter_verify(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+
+ BUG_ON(iter->btree_id >= BTREE_ID_NR);
+
+ BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
+
+ BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
+
+ BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
+ (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ !btree_type_has_snapshots(iter->btree_id));
+
+ if (iter->update_path)
+ bch2_btree_path_verify(trans, iter->update_path);
+ bch2_btree_path_verify(trans, iter->path);
+}
+
+static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
+{
+ BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ !iter->pos.snapshot);
+
+ BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ iter->pos.snapshot != iter->snapshot);
+
+ BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
+ bkey_gt(iter->pos, iter->k.p));
+}
+
+static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
+{
+ struct btree_trans *trans = iter->trans;
+ struct btree_iter copy;
+ struct bkey_s_c prev;
+ int ret = 0;
+
+ if (!bch2_debug_check_iterators)
+ return 0;
+
+ if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
+ return 0;
+
+ if (bkey_err(k) || !k.k)
+ return 0;
+
+ BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
+ iter->snapshot,
+ k.k->p.snapshot));
+
+ bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
+ BTREE_ITER_NOPRESERVE|
+ BTREE_ITER_ALL_SNAPSHOTS);
+ prev = bch2_btree_iter_prev(&copy);
+ if (!prev.k)
+ goto out;
+
+ ret = bkey_err(prev);
+ if (ret)
+ goto out;
+
+ if (bkey_eq(prev.k->p, k.k->p) &&
+ bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
+ prev.k->p.snapshot) > 0) {
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+
+ bch2_bkey_to_text(&buf1, k.k);
+ bch2_bkey_to_text(&buf2, prev.k);
+
+ panic("iter snap %u\n"
+ "k %s\n"
+ "prev %s\n",
+ iter->snapshot,
+ buf1.buf, buf2.buf);
+ }
+out:
+ bch2_trans_iter_exit(trans, &copy);
+ return ret;
+}
+
+void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
+ struct bpos pos, bool key_cache)
+{
+ struct btree_path *path;
+ unsigned idx;
+ struct printbuf buf = PRINTBUF;
+
+ btree_trans_sort_paths(trans);
+
+ trans_for_each_path_inorder(trans, path, idx) {
+ int cmp = cmp_int(path->btree_id, id) ?:
+ cmp_int(path->cached, key_cache);
+
+ if (cmp > 0)
+ break;
+ if (cmp < 0)
+ continue;
+
+ if (!btree_node_locked(path, 0) ||
+ !path->should_be_locked)
+ continue;
+
+ if (!key_cache) {
+ if (bkey_ge(pos, path->l[0].b->data->min_key) &&
+ bkey_le(pos, path->l[0].b->key.k.p))
+ return;
+ } else {
+ if (bkey_eq(pos, path->pos))
+ return;
+ }
+ }
+
+ bch2_dump_trans_paths_updates(trans);
+ bch2_bpos_to_text(&buf, pos);
+
+ panic("not locked: %s %s%s\n",
+ bch2_btree_ids[id], buf.buf,
+ key_cache ? " cached" : "");
+}
+
+#else
+
+static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
+ struct btree_path *path, unsigned l) {}
+static inline void bch2_btree_path_verify(struct btree_trans *trans,
+ struct btree_path *path) {}
+static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
+static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
+
+#endif
+
+/* Btree path: fixups after btree updates */
+
+static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
+ struct btree *b,
+ struct bset_tree *t,
+ struct bkey_packed *k)
+{
+ struct btree_node_iter_set *set;
+
+ btree_node_iter_for_each(iter, set)
+ if (set->end == t->end_offset) {
+ set->k = __btree_node_key_to_offset(b, k);
+ bch2_btree_node_iter_sort(iter, b);
+ return;
+ }
+
+ bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
+}
+
+static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
+ struct btree *b,
+ struct bkey_packed *where)
+{
+ struct btree_path_level *l = &path->l[b->c.level];
+
+ if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
+ return;
+
+ if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
+ bch2_btree_node_iter_advance(&l->iter, l->b);
+}
+
+void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
+ struct btree *b,
+ struct bkey_packed *where)
+{
+ struct btree_path *path;
+
+ trans_for_each_path_with_node(trans, b, path) {
+ __bch2_btree_path_fix_key_modified(path, b, where);
+ bch2_btree_path_verify_level(trans, path, b->c.level);
+ }
+}
+
+static void __bch2_btree_node_iter_fix(struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter *node_iter,
+ struct bset_tree *t,
+ struct bkey_packed *where,
+ unsigned clobber_u64s,
+ unsigned new_u64s)
+{
+ const struct bkey_packed *end = btree_bkey_last(b, t);
+ struct btree_node_iter_set *set;
+ unsigned offset = __btree_node_key_to_offset(b, where);
+ int shift = new_u64s - clobber_u64s;
+ unsigned old_end = t->end_offset - shift;
+ unsigned orig_iter_pos = node_iter->data[0].k;
+ bool iter_current_key_modified =
+ orig_iter_pos >= offset &&
+ orig_iter_pos <= offset + clobber_u64s;
+
+ btree_node_iter_for_each(node_iter, set)
+ if (set->end == old_end)
+ goto found;
+
+ /* didn't find the bset in the iterator - might have to readd it: */
+ if (new_u64s &&
+ bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
+ bch2_btree_node_iter_push(node_iter, b, where, end);
+ goto fixup_done;
+ } else {
+ /* Iterator is after key that changed */
+ return;
+ }
+found:
+ set->end = t->end_offset;
+
+ /* Iterator hasn't gotten to the key that changed yet: */
+ if (set->k < offset)
+ return;
+
+ if (new_u64s &&
+ bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
+ set->k = offset;
+ } else if (set->k < offset + clobber_u64s) {
+ set->k = offset + new_u64s;
+ if (set->k == set->end)
+ bch2_btree_node_iter_set_drop(node_iter, set);
+ } else {
+ /* Iterator is after key that changed */
+ set->k = (int) set->k + shift;
+ return;
+ }
+
+ bch2_btree_node_iter_sort(node_iter, b);
+fixup_done:
+ if (node_iter->data[0].k != orig_iter_pos)
+ iter_current_key_modified = true;
+
+ /*
+ * When a new key is added, and the node iterator now points to that
+ * key, the iterator might have skipped past deleted keys that should
+ * come after the key the iterator now points to. We have to rewind to
+ * before those deleted keys - otherwise
+ * bch2_btree_node_iter_prev_all() breaks:
+ */
+ if (!bch2_btree_node_iter_end(node_iter) &&
+ iter_current_key_modified &&
+ b->c.level) {
+ struct bkey_packed *k, *k2, *p;
+
+ k = bch2_btree_node_iter_peek_all(node_iter, b);
+
+ for_each_bset(b, t) {
+ bool set_pos = false;
+
+ if (node_iter->data[0].end == t->end_offset)
+ continue;
+
+ k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
+
+ while ((p = bch2_bkey_prev_all(b, t, k2)) &&
+ bkey_iter_cmp(b, k, p) < 0) {
+ k2 = p;
+ set_pos = true;
+ }
+
+ if (set_pos)
+ btree_node_iter_set_set_pos(node_iter,
+ b, t, k2);
+ }
+ }
+}
+
+void bch2_btree_node_iter_fix(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter *node_iter,
+ struct bkey_packed *where,
+ unsigned clobber_u64s,
+ unsigned new_u64s)
+{
+ struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
+ struct btree_path *linked;
+
+ if (node_iter != &path->l[b->c.level].iter) {
+ __bch2_btree_node_iter_fix(path, b, node_iter, t,
+ where, clobber_u64s, new_u64s);
+
+ if (bch2_debug_check_iterators)
+ bch2_btree_node_iter_verify(node_iter, b);
+ }
+
+ trans_for_each_path_with_node(trans, b, linked) {
+ __bch2_btree_node_iter_fix(linked, b,
+ &linked->l[b->c.level].iter, t,
+ where, clobber_u64s, new_u64s);
+ bch2_btree_path_verify_level(trans, linked, b->c.level);
+ }
+}
+
+/* Btree path level: pointer to a particular btree node and node iter */
+
+static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
+ struct btree_path_level *l,
+ struct bkey *u,
+ struct bkey_packed *k)
+{
+ if (unlikely(!k)) {
+ /*
+ * signal to bch2_btree_iter_peek_slot() that we're currently at
+ * a hole
+ */
+ u->type = KEY_TYPE_deleted;
+ return bkey_s_c_null;
+ }
+
+ return bkey_disassemble(l->b, k, u);
+}
+
+static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
+ struct btree_path_level *l,
+ struct bkey *u)
+{
+ return __btree_iter_unpack(c, l, u,
+ bch2_btree_node_iter_peek_all(&l->iter, l->b));
+}
+
+static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_path_level *l,
+ struct bkey *u)
+{
+ struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
+ bch2_btree_node_iter_peek(&l->iter, l->b));
+
+ path->pos = k.k ? k.k->p : l->b->key.k.p;
+ trans->paths_sorted = false;
+ bch2_btree_path_verify_level(trans, path, l - path->l);
+ return k;
+}
+
+static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_path_level *l,
+ struct bkey *u)
+{
+ struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
+ bch2_btree_node_iter_prev(&l->iter, l->b));
+
+ path->pos = k.k ? k.k->p : l->b->data->min_key;
+ trans->paths_sorted = false;
+ bch2_btree_path_verify_level(trans, path, l - path->l);
+ return k;
+}
+
+static inline bool btree_path_advance_to_pos(struct btree_path *path,
+ struct btree_path_level *l,
+ int max_advance)
+{
+ struct bkey_packed *k;
+ int nr_advanced = 0;
+
+ while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
+ bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
+ if (max_advance > 0 && nr_advanced >= max_advance)
+ return false;
+
+ bch2_btree_node_iter_advance(&l->iter, l->b);
+ nr_advanced++;
+ }
+
+ return true;
+}
+
+static inline void __btree_path_level_init(struct btree_path *path,
+ unsigned level)
+{
+ struct btree_path_level *l = &path->l[level];
+
+ bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
+
+ /*
+ * Iterators to interior nodes should always be pointed at the first non
+ * whiteout:
+ */
+ if (level)
+ bch2_btree_node_iter_peek(&l->iter, l->b);
+}
+
+void bch2_btree_path_level_init(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
+{
+ BUG_ON(path->cached);
+
+ EBUG_ON(!btree_path_pos_in_node(path, b));
+
+ path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
+ path->l[b->c.level].b = b;
+ __btree_path_level_init(path, b->c.level);
+}
+
+/* Btree path: fixups after btree node updates: */
+
+static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i)
+ if (!i->cached &&
+ i->level == b->c.level &&
+ i->btree_id == b->c.btree_id &&
+ bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
+ bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
+ i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
+
+ if (unlikely(trans->journal_replay_not_finished)) {
+ struct bkey_i *j_k =
+ bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
+ i->k->k.p);
+
+ if (j_k) {
+ i->old_k = j_k->k;
+ i->old_v = &j_k->v;
+ }
+ }
+ }
+}
+
+/*
+ * A btree node is being replaced - update the iterator to point to the new
+ * node:
+ */
+void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (path->uptodate == BTREE_ITER_UPTODATE &&
+ !path->cached &&
+ btree_path_pos_in_node(path, b)) {
+ enum btree_node_locked_type t =
+ btree_lock_want(path, b->c.level);
+
+ if (t != BTREE_NODE_UNLOCKED) {
+ btree_node_unlock(trans, path, b->c.level);
+ six_lock_increment(&b->c.lock, (enum six_lock_type) t);
+ mark_btree_node_locked(trans, path, b->c.level, t);
+ }
+
+ bch2_btree_path_level_init(trans, path, b);
+ }
+
+ bch2_trans_revalidate_updates_in_node(trans, b);
+}
+
+/*
+ * A btree node has been modified in such a way as to invalidate iterators - fix
+ * them:
+ */
+void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
+{
+ struct btree_path *path;
+
+ trans_for_each_path_with_node(trans, b, path)
+ __btree_path_level_init(path, b->c.level);
+
+ bch2_trans_revalidate_updates_in_node(trans, b);
+}
+
+/* Btree path: traverse, set_pos: */
+
+static inline int btree_path_lock_root(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned depth_want,
+ unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
+ enum six_lock_type lock_type;
+ unsigned i;
+ int ret;
+
+ EBUG_ON(path->nodes_locked);
+
+ while (1) {
+ b = READ_ONCE(*rootp);
+ path->level = READ_ONCE(b->c.level);
+
+ if (unlikely(path->level < depth_want)) {
+ /*
+ * the root is at a lower depth than the depth we want:
+ * got to the end of the btree, or we're walking nodes
+ * greater than some depth and there are no nodes >=
+ * that depth
+ */
+ path->level = depth_want;
+ for (i = path->level; i < BTREE_MAX_DEPTH; i++)
+ path->l[i].b = NULL;
+ return 1;
+ }
+
+ lock_type = __btree_lock_want(path, path->level);
+ ret = btree_node_lock(trans, path, &b->c,
+ path->level, lock_type, trace_ip);
+ if (unlikely(ret)) {
+ if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
+ continue;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ret;
+ BUG();
+ }
+
+ if (likely(b == READ_ONCE(*rootp) &&
+ b->c.level == path->level &&
+ !race_fault())) {
+ for (i = 0; i < path->level; i++)
+ path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
+ path->l[path->level].b = b;
+ for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
+ path->l[i].b = NULL;
+
+ mark_btree_node_locked(trans, path, path->level,
+ (enum btree_node_locked_type) lock_type);
+ bch2_btree_path_level_init(trans, path, b);
+ return 0;
+ }
+
+ six_unlock_type(&b->c.lock, lock_type);
+ }
+}
+
+noinline
+static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path_level *l = path_l(path);
+ struct btree_node_iter node_iter = l->iter;
+ struct bkey_packed *k;
+ struct bkey_buf tmp;
+ unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
+ ? (path->level > 1 ? 0 : 2)
+ : (path->level > 1 ? 1 : 16);
+ bool was_locked = btree_node_locked(path, path->level);
+ int ret = 0;
+
+ bch2_bkey_buf_init(&tmp);
+
+ while (nr-- && !ret) {
+ if (!bch2_btree_node_relock(trans, path, path->level))
+ break;
+
+ bch2_btree_node_iter_advance(&node_iter, l->b);
+ k = bch2_btree_node_iter_peek(&node_iter, l->b);
+ if (!k)
+ break;
+
+ bch2_bkey_buf_unpack(&tmp, c, l->b, k);
+ ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
+ path->level - 1);
+ }
+
+ if (!was_locked)
+ btree_node_unlock(trans, path, path->level);
+
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
+}
+
+static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
+ struct btree_and_journal_iter *jiter)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ struct bkey_buf tmp;
+ unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
+ ? (path->level > 1 ? 0 : 2)
+ : (path->level > 1 ? 1 : 16);
+ bool was_locked = btree_node_locked(path, path->level);
+ int ret = 0;
+
+ bch2_bkey_buf_init(&tmp);
+
+ while (nr-- && !ret) {
+ if (!bch2_btree_node_relock(trans, path, path->level))
+ break;
+
+ bch2_btree_and_journal_iter_advance(jiter);
+ k = bch2_btree_and_journal_iter_peek(jiter);
+ if (!k.k)
+ break;
+
+ bch2_bkey_buf_reassemble(&tmp, c, k);
+ ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
+ path->level - 1);
+ }
+
+ if (!was_locked)
+ btree_node_unlock(trans, path, path->level);
+
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
+}
+
+static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned plevel, struct btree *b)
+{
+ struct btree_path_level *l = &path->l[plevel];
+ bool locked = btree_node_locked(path, plevel);
+ struct bkey_packed *k;
+ struct bch_btree_ptr_v2 *bp;
+
+ if (!bch2_btree_node_relock(trans, path, plevel))
+ return;
+
+ k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
+
+ bp = (void *) bkeyp_val(&l->b->format, k);
+ bp->mem_ptr = (unsigned long)b;
+
+ if (!locked)
+ btree_node_unlock(trans, path, plevel);
+}
+
+static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags,
+ struct bkey_buf *out)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path_level *l = path_l(path);
+ struct btree_and_journal_iter jiter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
+
+ k = bch2_btree_and_journal_iter_peek(&jiter);
+
+ bch2_bkey_buf_reassemble(out, c, k);
+
+ if (flags & BTREE_ITER_PREFETCH)
+ ret = btree_path_prefetch_j(trans, path, &jiter);
+
+ bch2_btree_and_journal_iter_exit(&jiter);
+ return ret;
+}
+
+static __always_inline int btree_path_down(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags,
+ unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path_level *l = path_l(path);
+ struct btree *b;
+ unsigned level = path->level - 1;
+ enum six_lock_type lock_type = __btree_lock_want(path, level);
+ struct bkey_buf tmp;
+ int ret;
+
+ EBUG_ON(!btree_node_locked(path, path->level));
+
+ bch2_bkey_buf_init(&tmp);
+
+ if (unlikely(trans->journal_replay_not_finished)) {
+ ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
+ if (ret)
+ goto err;
+ } else {
+ bch2_bkey_buf_unpack(&tmp, c, l->b,
+ bch2_btree_node_iter_peek(&l->iter, l->b));
+
+ if (flags & BTREE_ITER_PREFETCH) {
+ ret = btree_path_prefetch(trans, path);
+ if (ret)
+ goto err;
+ }
+ }
+
+ b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
+ ret = PTR_ERR_OR_ZERO(b);
+ if (unlikely(ret))
+ goto err;
+
+ if (likely(!trans->journal_replay_not_finished &&
+ tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
+ unlikely(b != btree_node_mem_ptr(tmp.k)))
+ btree_node_mem_ptr_set(trans, path, level + 1, b);
+
+ if (btree_node_read_locked(path, level + 1))
+ btree_node_unlock(trans, path, level + 1);
+
+ mark_btree_node_locked(trans, path, level,
+ (enum btree_node_locked_type) lock_type);
+ path->level = level;
+ bch2_btree_path_level_init(trans, path, b);
+
+ bch2_btree_path_verify_locks(path);
+err:
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
+}
+
+
+static int bch2_btree_path_traverse_all(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
+ unsigned long trace_ip = _RET_IP_;
+ int i, ret = 0;
+
+ if (trans->in_traverse_all)
+ return -BCH_ERR_transaction_restart_in_traverse_all;
+
+ trans->in_traverse_all = true;
+retry_all:
+ trans->restarted = 0;
+ trans->last_restarted_ip = 0;
+
+ trans_for_each_path(trans, path)
+ path->should_be_locked = false;
+
+ btree_trans_sort_paths(trans);
+
+ bch2_trans_unlock(trans);
+ cond_resched();
+
+ if (unlikely(trans->memory_allocation_failure)) {
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ do {
+ ret = bch2_btree_cache_cannibalize_lock(c, &cl);
+ closure_sync(&cl);
+ } while (ret);
+ }
+
+ /* Now, redo traversals in correct order: */
+ i = 0;
+ while (i < trans->nr_sorted) {
+ path = trans->paths + trans->sorted[i];
+
+ /*
+ * Traversing a path can cause another path to be added at about
+ * the same position:
+ */
+ if (path->uptodate) {
+ __btree_path_get(path, false);
+ ret = bch2_btree_path_traverse_one(trans, path, 0, _THIS_IP_);
+ __btree_path_put(path, false);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ bch2_err_matches(ret, ENOMEM))
+ goto retry_all;
+ if (ret)
+ goto err;
+ } else {
+ i++;
+ }
+ }
+
+ /*
+ * We used to assert that all paths had been traversed here
+ * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
+ * path->should_be_locked is not set yet, we might have unlocked and
+ * then failed to relock a path - that's fine.
+ */
+err:
+ bch2_btree_cache_cannibalize_unlock(c);
+
+ trans->in_traverse_all = false;
+
+ trace_and_count(c, trans_traverse_all, trans, trace_ip);
+ return ret;
+}
+
+static inline bool btree_path_check_pos_in_node(struct btree_path *path,
+ unsigned l, int check_pos)
+{
+ if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
+ return false;
+ if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
+ return false;
+ return true;
+}
+
+static inline bool btree_path_good_node(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned l, int check_pos)
+{
+ return is_btree_node(path, l) &&
+ bch2_btree_node_relock(trans, path, l) &&
+ btree_path_check_pos_in_node(path, l, check_pos);
+}
+
+static void btree_path_set_level_down(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned new_level)
+{
+ unsigned l;
+
+ path->level = new_level;
+
+ for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
+ if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
+ btree_node_unlock(trans, path, l);
+
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ bch2_btree_path_verify(trans, path);
+}
+
+static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
+ struct btree_path *path,
+ int check_pos)
+{
+ unsigned i, l = path->level;
+again:
+ while (btree_path_node(path, l) &&
+ !btree_path_good_node(trans, path, l, check_pos))
+ __btree_path_set_level_up(trans, path, l++);
+
+ /* If we need intent locks, take them too: */
+ for (i = l + 1;
+ i < path->locks_want && btree_path_node(path, i);
+ i++)
+ if (!bch2_btree_node_relock(trans, path, i)) {
+ while (l <= i)
+ __btree_path_set_level_up(trans, path, l++);
+ goto again;
+ }
+
+ return l;
+}
+
+static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
+ struct btree_path *path,
+ int check_pos)
+{
+ return likely(btree_node_locked(path, path->level) &&
+ btree_path_check_pos_in_node(path, path->level, check_pos))
+ ? path->level
+ : __btree_path_up_until_good_node(trans, path, check_pos);
+}
+
+/*
+ * This is the main state machine for walking down the btree - walks down to a
+ * specified depth
+ *
+ * Returns 0 on success, -EIO on error (error reading in a btree node).
+ *
+ * On error, caller (peek_node()/peek_key()) must return NULL; the error is
+ * stashed in the iterator and returned from bch2_trans_exit().
+ */
+int bch2_btree_path_traverse_one(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags,
+ unsigned long trace_ip)
+{
+ unsigned depth_want = path->level;
+ int ret = -((int) trans->restarted);
+
+ if (unlikely(ret))
+ goto out;
+
+ /*
+ * Ensure we obey path->should_be_locked: if it's set, we can't unlock
+ * and re-traverse the path without a transaction restart:
+ */
+ if (path->should_be_locked) {
+ ret = bch2_btree_path_relock(trans, path, trace_ip);
+ goto out;
+ }
+
+ if (path->cached) {
+ ret = bch2_btree_path_traverse_cached(trans, path, flags);
+ goto out;
+ }
+
+ if (unlikely(path->level >= BTREE_MAX_DEPTH))
+ goto out;
+
+ path->level = btree_path_up_until_good_node(trans, path, 0);
+
+ EBUG_ON(btree_path_node(path, path->level) &&
+ !btree_node_locked(path, path->level));
+
+ /*
+ * Note: path->nodes[path->level] may be temporarily NULL here - that
+ * would indicate to other code that we got to the end of the btree,
+ * here it indicates that relocking the root failed - it's critical that
+ * btree_path_lock_root() comes next and that it can't fail
+ */
+ while (path->level > depth_want) {
+ ret = btree_path_node(path, path->level)
+ ? btree_path_down(trans, path, flags, trace_ip)
+ : btree_path_lock_root(trans, path, depth_want, trace_ip);
+ if (unlikely(ret)) {
+ if (ret == 1) {
+ /*
+ * No nodes at this level - got to the end of
+ * the btree:
+ */
+ ret = 0;
+ goto out;
+ }
+
+ __bch2_btree_path_unlock(trans, path);
+ path->level = depth_want;
+ path->l[path->level].b = ERR_PTR(ret);
+ goto out;
+ }
+ }
+
+ path->uptodate = BTREE_ITER_UPTODATE;
+out:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
+ panic("ret %s (%i) trans->restarted %s (%i)\n",
+ bch2_err_str(ret), ret,
+ bch2_err_str(trans->restarted), trans->restarted);
+ bch2_btree_path_verify(trans, path);
+ return ret;
+}
+
+static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
+ struct btree_path *src)
+{
+ unsigned i, offset = offsetof(struct btree_path, pos);
+
+ memcpy((void *) dst + offset,
+ (void *) src + offset,
+ sizeof(struct btree_path) - offset);
+
+ for (i = 0; i < BTREE_MAX_DEPTH; i++) {
+ unsigned t = btree_node_locked_type(dst, i);
+
+ if (t != BTREE_NODE_UNLOCKED)
+ six_lock_increment(&dst->l[i].b->c.lock, t);
+ }
+}
+
+static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
+ bool intent)
+{
+ struct btree_path *new = btree_path_alloc(trans, src);
+
+ btree_path_copy(trans, new, src);
+ __btree_path_get(new, intent);
+ return new;
+}
+
+__flatten
+struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
+ struct btree_path *path, bool intent,
+ unsigned long ip)
+{
+ __btree_path_put(path, intent);
+ path = btree_path_clone(trans, path, intent);
+ path->preserve = false;
+ return path;
+}
+
+struct btree_path * __must_check
+__bch2_btree_path_set_pos(struct btree_trans *trans,
+ struct btree_path *path, struct bpos new_pos,
+ bool intent, unsigned long ip, int cmp)
+{
+ unsigned level = path->level;
+
+ bch2_trans_verify_not_in_restart(trans);
+ EBUG_ON(!path->ref);
+
+ path = bch2_btree_path_make_mut(trans, path, intent, ip);
+
+ path->pos = new_pos;
+ trans->paths_sorted = false;
+
+ if (unlikely(path->cached)) {
+ btree_node_unlock(trans, path, 0);
+ path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ goto out;
+ }
+
+ level = btree_path_up_until_good_node(trans, path, cmp);
+
+ if (btree_path_node(path, level)) {
+ struct btree_path_level *l = &path->l[level];
+
+ BUG_ON(!btree_node_locked(path, level));
+ /*
+ * We might have to skip over many keys, or just a few: try
+ * advancing the node iterator, and if we have to skip over too
+ * many keys just reinit it (or if we're rewinding, since that
+ * is expensive).
+ */
+ if (cmp < 0 ||
+ !btree_path_advance_to_pos(path, l, 8))
+ bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
+
+ /*
+ * Iterators to interior nodes should always be pointed at the first non
+ * whiteout:
+ */
+ if (unlikely(level))
+ bch2_btree_node_iter_peek(&l->iter, l->b);
+ }
+
+ if (unlikely(level != path->level)) {
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ __bch2_btree_path_unlock(trans, path);
+ }
+out:
+ bch2_btree_path_verify(trans, path);
+ return path;
+}
+
+/* Btree path: main interface: */
+
+static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
+{
+ struct btree_path *sib;
+
+ sib = prev_btree_path(trans, path);
+ if (sib && !btree_path_cmp(sib, path))
+ return sib;
+
+ sib = next_btree_path(trans, path);
+ if (sib && !btree_path_cmp(sib, path))
+ return sib;
+
+ return NULL;
+}
+
+static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
+{
+ struct btree_path *sib;
+
+ sib = prev_btree_path(trans, path);
+ if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
+ return sib;
+
+ sib = next_btree_path(trans, path);
+ if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
+ return sib;
+
+ return NULL;
+}
+
+static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
+{
+ __bch2_btree_path_unlock(trans, path);
+ btree_path_list_remove(trans, path);
+ trans->paths_allocated &= ~(1ULL << path->idx);
+}
+
+void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
+{
+ struct btree_path *dup;
+
+ EBUG_ON(trans->paths + path->idx != path);
+ EBUG_ON(!path->ref);
+
+ if (!__btree_path_put(path, intent))
+ return;
+
+ dup = path->preserve
+ ? have_path_at_pos(trans, path)
+ : have_node_at_pos(trans, path);
+
+ if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
+ return;
+
+ if (path->should_be_locked &&
+ !trans->restarted &&
+ (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
+ return;
+
+ if (dup) {
+ dup->preserve |= path->preserve;
+ dup->should_be_locked |= path->should_be_locked;
+ }
+
+ __bch2_path_free(trans, path);
+}
+
+static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *path,
+ bool intent)
+{
+ EBUG_ON(trans->paths + path->idx != path);
+ EBUG_ON(!path->ref);
+
+ if (!__btree_path_put(path, intent))
+ return;
+
+ __bch2_path_free(trans, path);
+}
+
+void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
+{
+ panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
+ trans->restart_count, restart_count,
+ (void *) trans->last_begin_ip);
+}
+
+void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
+{
+ panic("in transaction restart: %s, last restarted by %pS\n",
+ bch2_err_str(trans->restarted),
+ (void *) trans->last_restarted_ip);
+}
+
+noinline __cold
+void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
+{
+ struct btree_insert_entry *i;
+ struct btree_write_buffered_key *wb;
+
+ prt_printf(buf, "transaction updates for %s journal seq %llu",
+ trans->fn, trans->journal_res.seq);
+ prt_newline(buf);
+ printbuf_indent_add(buf, 2);
+
+ trans_for_each_update(trans, i) {
+ struct bkey_s_c old = { &i->old_k, i->old_v };
+
+ prt_printf(buf, "update: btree=%s cached=%u %pS",
+ bch2_btree_ids[i->btree_id],
+ i->cached,
+ (void *) i->ip_allocated);
+ prt_newline(buf);
+
+ prt_printf(buf, " old ");
+ bch2_bkey_val_to_text(buf, trans->c, old);
+ prt_newline(buf);
+
+ prt_printf(buf, " new ");
+ bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
+ prt_newline(buf);
+ }
+
+ trans_for_each_wb_update(trans, wb) {
+ prt_printf(buf, "update: btree=%s wb=1 %pS",
+ bch2_btree_ids[wb->btree],
+ (void *) i->ip_allocated);
+ prt_newline(buf);
+
+ prt_printf(buf, " new ");
+ bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(&wb->k));
+ prt_newline(buf);
+ }
+
+ printbuf_indent_sub(buf, 2);
+}
+
+noinline __cold
+void bch2_dump_trans_updates(struct btree_trans *trans)
+{
+ struct printbuf buf = PRINTBUF;
+
+ bch2_trans_updates_to_text(&buf, trans);
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+}
+
+noinline __cold
+void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
+{
+ prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
+ path->idx, path->ref, path->intent_ref,
+ path->preserve ? 'P' : ' ',
+ path->should_be_locked ? 'S' : ' ',
+ bch2_btree_ids[path->btree_id],
+ path->level);
+ bch2_bpos_to_text(out, path->pos);
+
+ prt_printf(out, " locks %u", path->nodes_locked);
+#ifdef TRACK_PATH_ALLOCATED
+ prt_printf(out, " %pS", (void *) path->ip_allocated);
+#endif
+ prt_newline(out);
+}
+
+static noinline __cold
+void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
+ bool nosort)
+{
+ struct btree_path *path;
+ unsigned idx;
+
+ if (!nosort)
+ btree_trans_sort_paths(trans);
+
+ trans_for_each_path_inorder(trans, path, idx)
+ bch2_btree_path_to_text(out, path);
+}
+
+noinline __cold
+void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
+{
+ __bch2_trans_paths_to_text(out, trans, false);
+}
+
+static noinline __cold
+void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
+{
+ struct printbuf buf = PRINTBUF;
+
+ __bch2_trans_paths_to_text(&buf, trans, nosort);
+ bch2_trans_updates_to_text(&buf, trans);
+
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+}
+
+noinline __cold
+void bch2_dump_trans_paths_updates(struct btree_trans *trans)
+{
+ __bch2_dump_trans_paths_updates(trans, false);
+}
+
+noinline __cold
+static void bch2_trans_update_max_paths(struct btree_trans *trans)
+{
+ struct btree_transaction_stats *s = btree_trans_stats(trans);
+ struct printbuf buf = PRINTBUF;
+
+ if (!s)
+ return;
+
+ bch2_trans_paths_to_text(&buf, trans);
+
+ if (!buf.allocation_failure) {
+ mutex_lock(&s->lock);
+ if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
+ s->nr_max_paths = trans->nr_max_paths =
+ hweight64(trans->paths_allocated);
+ swap(s->max_paths_text, buf.buf);
+ }
+ mutex_unlock(&s->lock);
+ }
+
+ printbuf_exit(&buf);
+
+ trans->nr_max_paths = hweight64(trans->paths_allocated);
+}
+
+static noinline void btree_path_overflow(struct btree_trans *trans)
+{
+ bch2_dump_trans_paths_updates(trans);
+ panic("trans path overflow\n");
+}
+
+static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
+ struct btree_path *pos)
+{
+ struct btree_path *path;
+ unsigned idx;
+
+ if (unlikely(trans->paths_allocated ==
+ ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
+ btree_path_overflow(trans);
+
+ idx = __ffs64(~trans->paths_allocated);
+
+ /*
+ * Do this before marking the new path as allocated, since it won't be
+ * initialized yet:
+ */
+ if (unlikely(idx > trans->nr_max_paths))
+ bch2_trans_update_max_paths(trans);
+
+ trans->paths_allocated |= 1ULL << idx;
+
+ path = &trans->paths[idx];
+ path->idx = idx;
+ path->ref = 0;
+ path->intent_ref = 0;
+ path->nodes_locked = 0;
+
+ btree_path_list_add(trans, pos, path);
+ trans->paths_sorted = false;
+ return path;
+}
+
+struct btree_path *bch2_path_get(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos,
+ unsigned locks_want, unsigned level,
+ unsigned flags, unsigned long ip)
+{
+ struct btree_path *path, *path_pos = NULL;
+ bool cached = flags & BTREE_ITER_CACHED;
+ bool intent = flags & BTREE_ITER_INTENT;
+ int i;
+
+ bch2_trans_verify_not_in_restart(trans);
+ bch2_trans_verify_locks(trans);
+
+ btree_trans_sort_paths(trans);
+
+ trans_for_each_path_inorder(trans, path, i) {
+ if (__btree_path_cmp(path,
+ btree_id,
+ cached,
+ pos,
+ level) > 0)
+ break;
+
+ path_pos = path;
+ }
+
+ if (path_pos &&
+ path_pos->cached == cached &&
+ path_pos->btree_id == btree_id &&
+ path_pos->level == level) {
+ __btree_path_get(path_pos, intent);
+ path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
+ } else {
+ path = btree_path_alloc(trans, path_pos);
+ path_pos = NULL;
+
+ __btree_path_get(path, intent);
+ path->pos = pos;
+ path->btree_id = btree_id;
+ path->cached = cached;
+ path->uptodate = BTREE_ITER_NEED_TRAVERSE;
+ path->should_be_locked = false;
+ path->level = level;
+ path->locks_want = locks_want;
+ path->nodes_locked = 0;
+ for (i = 0; i < ARRAY_SIZE(path->l); i++)
+ path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
+#ifdef TRACK_PATH_ALLOCATED
+ path->ip_allocated = ip;
+#endif
+ trans->paths_sorted = false;
+ }
+
+ if (!(flags & BTREE_ITER_NOPRESERVE))
+ path->preserve = true;
+
+ if (path->intent_ref)
+ locks_want = max(locks_want, level + 1);
+
+ /*
+ * If the path has locks_want greater than requested, we don't downgrade
+ * it here - on transaction restart because btree node split needs to
+ * upgrade locks, we might be putting/getting the iterator again.
+ * Downgrading iterators only happens via bch2_trans_downgrade(), after
+ * a successful transaction commit.
+ */
+
+ locks_want = min(locks_want, BTREE_MAX_DEPTH);
+ if (locks_want > path->locks_want)
+ bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
+
+ return path;
+}
+
+struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
+{
+
+ struct btree_path_level *l = path_l(path);
+ struct bkey_packed *_k;
+ struct bkey_s_c k;
+
+ if (unlikely(!l->b))
+ return bkey_s_c_null;
+
+ EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
+ EBUG_ON(!btree_node_locked(path, path->level));
+
+ if (!path->cached) {
+ _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+ k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
+
+ EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
+
+ if (!k.k || !bpos_eq(path->pos, k.k->p))
+ goto hole;
+ } else {
+ struct bkey_cached *ck = (void *) path->l[0].b;
+
+ EBUG_ON(ck &&
+ (path->btree_id != ck->key.btree_id ||
+ !bkey_eq(path->pos, ck->key.pos)));
+ if (!ck || !ck->valid)
+ return bkey_s_c_null;
+
+ *u = ck->k->k;
+ k = bkey_i_to_s_c(ck->k);
+ }
+
+ return k;
+hole:
+ bkey_init(u);
+ u->p = path->pos;
+ return (struct bkey_s_c) { u, NULL };
+}
+
+/* Btree iterators: */
+
+int __must_check
+__bch2_btree_iter_traverse(struct btree_iter *iter)
+{
+ return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+}
+
+int __must_check
+bch2_btree_iter_traverse(struct btree_iter *iter)
+{
+ int ret;
+
+ iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
+ btree_iter_search_key(iter),
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ if (ret)
+ return ret;
+
+ btree_path_set_should_be_locked(iter->path);
+ return 0;
+}
+
+/* Iterate across nodes (leaf and interior nodes) */
+
+struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+ struct btree *b = NULL;
+ int ret;
+
+ EBUG_ON(iter->path->cached);
+ bch2_btree_iter_verify(iter);
+
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (ret)
+ goto err;
+
+ b = btree_path_node(iter->path, iter->path->level);
+ if (!b)
+ goto out;
+
+ BUG_ON(bpos_lt(b->key.k.p, iter->pos));
+
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = b->key.k.p;
+
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+ btree_path_set_should_be_locked(iter->path);
+out:
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
+
+ return b;
+err:
+ b = ERR_PTR(ret);
+ goto out;
+}
+
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
+{
+ struct btree *b;
+
+ while (b = bch2_btree_iter_peek_node(iter),
+ bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
+ bch2_trans_begin(iter->trans);
+
+ return b;
+}
+
+struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+ struct btree_path *path = iter->path;
+ struct btree *b = NULL;
+ int ret;
+
+ bch2_trans_verify_not_in_restart(trans);
+ EBUG_ON(iter->path->cached);
+ bch2_btree_iter_verify(iter);
+
+ /* already at end? */
+ if (!btree_path_node(path, path->level))
+ return NULL;
+
+ /* got to end? */
+ if (!btree_path_node(path, path->level + 1)) {
+ btree_path_set_level_up(trans, path);
+ return NULL;
+ }
+
+ if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
+ __bch2_btree_path_unlock(trans, path);
+ path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
+ path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+ goto err;
+ }
+
+ b = btree_path_node(path, path->level + 1);
+
+ if (bpos_eq(iter->pos, b->key.k.p)) {
+ __btree_path_set_level_up(trans, path, path->level++);
+ } else {
+ /*
+ * Haven't gotten to the end of the parent node: go back down to
+ * the next child node
+ */
+ path = iter->path =
+ bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ btree_path_set_level_down(trans, path, iter->min_depth);
+
+ ret = bch2_btree_path_traverse(trans, path, iter->flags);
+ if (ret)
+ goto err;
+
+ b = path->l[path->level].b;
+ }
+
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos = b->key.k.p;
+
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+ btree_path_set_should_be_locked(iter->path);
+ BUG_ON(iter->path->uptodate);
+out:
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
+
+ return b;
+err:
+ b = ERR_PTR(ret);
+ goto out;
+}
+
+/* Iterate across keys (in leaf nodes only) */
+
+inline bool bch2_btree_iter_advance(struct btree_iter *iter)
+{
+ if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
+ struct bpos pos = iter->k.p;
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, SPOS_MAX)
+ : bkey_eq(pos, SPOS_MAX));
+
+ if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ pos = bkey_successor(iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
+ return ret;
+ } else {
+ if (!btree_path_node(iter->path, iter->path->level))
+ return true;
+
+ iter->advanced = true;
+ return false;
+ }
+}
+
+inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
+{
+ struct bpos pos = bkey_start_pos(&iter->k);
+ bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ ? bpos_eq(pos, POS_MIN)
+ : bkey_eq(pos, POS_MIN));
+
+ if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ pos = bkey_predecessor(iter, pos);
+ bch2_btree_iter_set_pos(iter, pos);
+ return ret;
+}
+
+static noinline
+struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
+{
+ struct btree_insert_entry *i;
+ struct bkey_i *ret = NULL;
+
+ trans_for_each_update(iter->trans, i) {
+ if (i->btree_id < iter->btree_id)
+ continue;
+ if (i->btree_id > iter->btree_id)
+ break;
+ if (bpos_lt(i->k->k.p, iter->path->pos))
+ continue;
+ if (i->key_cache_already_flushed)
+ continue;
+ if (!ret || bpos_lt(i->k->k.p, ret->k.p))
+ ret = i->k;
+ }
+
+ return ret;
+}
+
+static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
+{
+ return iter->flags & BTREE_ITER_WITH_UPDATES
+ ? __bch2_btree_trans_peek_updates(iter)
+ : NULL;
+}
+
+static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end_pos)
+{
+ struct bkey_i *k;
+
+ if (bpos_lt(iter->path->pos, iter->journal_pos))
+ iter->journal_idx = 0;
+
+ k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
+ iter->path->level,
+ iter->path->pos,
+ end_pos,
+ &iter->journal_idx);
+
+ iter->journal_pos = k ? k->k.p : end_pos;
+ return k;
+}
+
+static noinline
+struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ struct bkey_i *k = bch2_btree_journal_peek(trans, iter, iter->path->pos);
+
+ if (k) {
+ iter->k = k->k;
+ return bkey_i_to_s_c(k);
+ } else {
+ return bkey_s_c_null;
+ }
+}
+
+static noinline
+struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bkey_i *next_journal =
+ bch2_btree_journal_peek(trans, iter,
+ k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
+
+ if (next_journal) {
+ iter->k = next_journal->k;
+ k = bkey_i_to_s_c(next_journal);
+ }
+
+ return k;
+}
+
+/*
+ * Checks btree key cache for key at iter->pos and returns it if present, or
+ * bkey_s_c_null:
+ */
+static noinline
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bch_fs *c = trans->c;
+ struct bkey u;
+ struct bkey_s_c k;
+ int ret;
+
+ if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
+ bpos_eq(iter->pos, pos))
+ return bkey_s_c_null;
+
+ if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
+ return bkey_s_c_null;
+
+ if (!iter->key_cache_path)
+ iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
+ iter->flags & BTREE_ITER_INTENT, 0,
+ iter->flags|BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL,
+ _THIS_IP_);
+
+ iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
+ iter->flags|BTREE_ITER_CACHED) ?:
+ bch2_btree_path_relock(trans, iter->path, _THIS_IP_);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ btree_path_set_should_be_locked(iter->key_cache_path);
+
+ k = bch2_btree_path_peek_slot(iter->key_cache_path, &u);
+ if (k.k && !bkey_err(k)) {
+ iter->k = u;
+ k.k = &iter->k;
+ }
+ return k;
+}
+
+static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bkey_i *next_update;
+ struct bkey_s_c k, k2;
+ int ret;
+
+ EBUG_ON(iter->path->cached);
+ bch2_btree_iter_verify(iter);
+
+ while (1) {
+ struct btree_path_level *l;
+
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (unlikely(ret)) {
+ /* ensure that iter->k is consistent with iter->pos: */
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ goto out;
+ }
+
+ l = path_l(iter->path);
+
+ if (unlikely(!l->b)) {
+ /* No btree nodes at requested level: */
+ bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ k = bkey_s_c_null;
+ goto out;
+ }
+
+ btree_path_set_should_be_locked(iter->path);
+
+ k = btree_path_level_peek_all(trans->c, l, &iter->k);
+
+ if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ k.k &&
+ (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ k = k2;
+ ret = bkey_err(k);
+ if (ret) {
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ goto out;
+ }
+ }
+
+ if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
+ k = btree_trans_peek_journal(trans, iter, k);
+
+ next_update = btree_trans_peek_updates(iter);
+
+ if (next_update &&
+ bpos_le(next_update->k.p,
+ k.k ? k.k->p : l->b->key.k.p)) {
+ iter->k = next_update->k;
+ k = bkey_i_to_s_c(next_update);
+ }
+
+ if (k.k && bkey_deleted(k.k)) {
+ /*
+ * If we've got a whiteout, and it's after the search
+ * key, advance the search key to the whiteout instead
+ * of just after the whiteout - it might be a btree
+ * whiteout, with a real key at the same position, since
+ * in the btree deleted keys sort before non deleted.
+ */
+ search_key = !bpos_eq(search_key, k.k->p)
+ ? k.k->p
+ : bpos_successor(k.k->p);
+ continue;
+ }
+
+ if (likely(k.k)) {
+ break;
+ } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
+ /* Advance to next leaf node: */
+ search_key = bpos_successor(l->b->key.k.p);
+ } else {
+ /* End of btree: */
+ bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ k = bkey_s_c_null;
+ goto out;
+ }
+ }
+out:
+ bch2_btree_iter_verify(iter);
+
+ return k;
+}
+
+/**
+ * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
+ * iterator's current position
+ * @iter: iterator to peek from
+ * @end: search limit: returns keys less than or equal to @end
+ *
+ * Returns: key if found, or an error extractable with bkey_err().
+ */
+struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bpos search_key = btree_iter_search_key(iter);
+ struct bkey_s_c k;
+ struct bpos iter_pos;
+ int ret;
+
+ EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
+ EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
+
+ if (iter->update_path) {
+ bch2_path_put_nokeep(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+ }
+
+ bch2_btree_iter_verify_entry_exit(iter);
+
+ while (1) {
+ k = __bch2_btree_iter_peek(iter, search_key);
+ if (unlikely(!k.k))
+ goto end;
+ if (unlikely(bkey_err(k)))
+ goto out_no_locked;
+
+ /*
+ * iter->pos should be mononotically increasing, and always be
+ * equal to the key we just returned - except extents can
+ * straddle iter->pos:
+ */
+ if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+ iter_pos = k.k->p;
+ else
+ iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
+
+ if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bkey_gt(iter_pos, end)
+ : bkey_ge(iter_pos, end)))
+ goto end;
+
+ if (iter->update_path &&
+ !bkey_eq(iter->update_path->pos, k.k->p)) {
+ bch2_path_put_nokeep(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+ }
+
+ if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ (iter->flags & BTREE_ITER_INTENT) &&
+ !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ !iter->update_path) {
+ struct bpos pos = k.k->p;
+
+ if (pos.snapshot < iter->snapshot) {
+ search_key = bpos_successor(k.k->p);
+ continue;
+ }
+
+ pos.snapshot = iter->snapshot;
+
+ /*
+ * advance, same as on exit for iter->path, but only up
+ * to snapshot
+ */
+ __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = iter->path;
+
+ iter->update_path = bch2_btree_path_set_pos(trans,
+ iter->update_path, pos,
+ iter->flags & BTREE_ITER_INTENT,
+ _THIS_IP_);
+ ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
+ if (unlikely(ret)) {
+ k = bkey_s_c_err(ret);
+ goto out_no_locked;
+ }
+ }
+
+ /*
+ * We can never have a key in a leaf node at POS_MAX, so
+ * we don't have to check these successor() calls:
+ */
+ if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ !bch2_snapshot_is_ancestor(trans->c,
+ iter->snapshot,
+ k.k->p.snapshot)) {
+ search_key = bpos_successor(k.k->p);
+ continue;
+ }
+
+ if (bkey_whiteout(k.k) &&
+ !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
+ search_key = bkey_successor(iter, k.k->p);
+ continue;
+ }
+
+ break;
+ }
+
+ iter->pos = iter_pos;
+
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ btree_path_set_should_be_locked(iter->path);
+out_no_locked:
+ if (iter->update_path) {
+ ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
+ if (unlikely(ret))
+ k = bkey_s_c_err(ret);
+ else
+ btree_path_set_should_be_locked(iter->update_path);
+ }
+
+ if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
+ iter->pos.snapshot = iter->snapshot;
+
+ ret = bch2_btree_iter_verify_ret(iter, k);
+ if (unlikely(ret)) {
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ }
+
+ bch2_btree_iter_verify_entry_exit(iter);
+
+ return k;
+end:
+ bch2_btree_iter_set_pos(iter, end);
+ k = bkey_s_c_null;
+ goto out_no_locked;
+}
+
+/**
+ * bch2_btree_iter_peek_all_levels() - returns the first key greater than or
+ * equal to iterator's current position, returning keys from every level of the
+ * btree. For keys at different levels of the btree that compare equal, the key
+ * from the lower level (leaf) is returned first.
+ * @iter: iterator to peek from
+ *
+ * Returns: key if found, or an error extractable with bkey_err().
+ */
+struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bkey_s_c k;
+ int ret;
+
+ EBUG_ON(iter->path->cached);
+ bch2_btree_iter_verify(iter);
+ BUG_ON(iter->path->level < iter->min_depth);
+ BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
+ EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
+
+ while (1) {
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (unlikely(ret)) {
+ /* ensure that iter->k is consistent with iter->pos: */
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ goto out_no_locked;
+ }
+
+ /* Already at end? */
+ if (!btree_path_node(iter->path, iter->path->level)) {
+ k = bkey_s_c_null;
+ goto out_no_locked;
+ }
+
+ k = btree_path_level_peek_all(trans->c,
+ &iter->path->l[iter->path->level], &iter->k);
+
+ /* Check if we should go up to the parent node: */
+ if (!k.k ||
+ (iter->advanced &&
+ bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
+ iter->pos = path_l(iter->path)->b->key.k.p;
+ btree_path_set_level_up(trans, iter->path);
+ iter->advanced = false;
+ continue;
+ }
+
+ /*
+ * Check if we should go back down to a leaf:
+ * If we're not in a leaf node, we only return the current key
+ * if it exactly matches iter->pos - otherwise we first have to
+ * go back to the leaf:
+ */
+ if (iter->path->level != iter->min_depth &&
+ (iter->advanced ||
+ !k.k ||
+ !bpos_eq(iter->pos, k.k->p))) {
+ btree_path_set_level_down(trans, iter->path, iter->min_depth);
+ iter->pos = bpos_successor(iter->pos);
+ iter->advanced = false;
+ continue;
+ }
+
+ /* Check if we should go to the next key: */
+ if (iter->path->level == iter->min_depth &&
+ iter->advanced &&
+ k.k &&
+ bpos_eq(iter->pos, k.k->p)) {
+ iter->pos = bpos_successor(iter->pos);
+ iter->advanced = false;
+ continue;
+ }
+
+ if (iter->advanced &&
+ iter->path->level == iter->min_depth &&
+ !bpos_eq(k.k->p, iter->pos))
+ iter->advanced = false;
+
+ BUG_ON(iter->advanced);
+ BUG_ON(!k.k);
+ break;
+ }
+
+ iter->pos = k.k->p;
+ btree_path_set_should_be_locked(iter->path);
+out_no_locked:
+ bch2_btree_iter_verify(iter);
+
+ return k;
+}
+
+/**
+ * bch2_btree_iter_next() - returns first key greater than iterator's current
+ * position
+ * @iter: iterator to peek from
+ *
+ * Returns: key if found, or an error extractable with bkey_err().
+ */
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
+{
+ if (!bch2_btree_iter_advance(iter))
+ return bkey_s_c_null;
+
+ return bch2_btree_iter_peek(iter);
+}
+
+/**
+ * bch2_btree_iter_peek_prev() - returns first key less than or equal to
+ * iterator's current position
+ * @iter: iterator to peek from
+ *
+ * Returns: key if found, or an error extractable with bkey_err().
+ */
+struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bpos search_key = iter->pos;
+ struct btree_path *saved_path = NULL;
+ struct bkey_s_c k;
+ struct bkey saved_k;
+ const struct bch_val *saved_v;
+ int ret;
+
+ EBUG_ON(iter->path->cached || iter->path->level);
+ EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
+
+ if (iter->flags & BTREE_ITER_WITH_JOURNAL)
+ return bkey_s_c_err(-EIO);
+
+ bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify_entry_exit(iter);
+
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ search_key.snapshot = U32_MAX;
+
+ while (1) {
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (unlikely(ret)) {
+ /* ensure that iter->k is consistent with iter->pos: */
+ bch2_btree_iter_set_pos(iter, iter->pos);
+ k = bkey_s_c_err(ret);
+ goto out_no_locked;
+ }
+
+ k = btree_path_level_peek(trans, iter->path,
+ &iter->path->l[0], &iter->k);
+ if (!k.k ||
+ ((iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bpos_ge(bkey_start_pos(k.k), search_key)
+ : bpos_gt(k.k->p, search_key)))
+ k = btree_path_level_prev(trans, iter->path,
+ &iter->path->l[0], &iter->k);
+
+ if (likely(k.k)) {
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
+ if (k.k->p.snapshot == iter->snapshot)
+ goto got_key;
+
+ /*
+ * If we have a saved candidate, and we're no
+ * longer at the same _key_ (not pos), return
+ * that candidate
+ */
+ if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
+ bch2_path_put_nokeep(trans, iter->path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->path = saved_path;
+ saved_path = NULL;
+ iter->k = saved_k;
+ k.v = saved_v;
+ goto got_key;
+ }
+
+ if (bch2_snapshot_is_ancestor(iter->trans->c,
+ iter->snapshot,
+ k.k->p.snapshot)) {
+ if (saved_path)
+ bch2_path_put_nokeep(trans, saved_path,
+ iter->flags & BTREE_ITER_INTENT);
+ saved_path = btree_path_clone(trans, iter->path,
+ iter->flags & BTREE_ITER_INTENT);
+ saved_k = *k.k;
+ saved_v = k.v;
+ }
+
+ search_key = bpos_predecessor(k.k->p);
+ continue;
+ }
+got_key:
+ if (bkey_whiteout(k.k) &&
+ !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
+ search_key = bkey_predecessor(iter, k.k->p);
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ search_key.snapshot = U32_MAX;
+ continue;
+ }
+
+ break;
+ } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
+ /* Advance to previous leaf node: */
+ search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
+ } else {
+ /* Start of btree: */
+ bch2_btree_iter_set_pos(iter, POS_MIN);
+ k = bkey_s_c_null;
+ goto out_no_locked;
+ }
+ }
+
+ EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
+
+ /* Extents can straddle iter->pos: */
+ if (bkey_lt(k.k->p, iter->pos))
+ iter->pos = k.k->p;
+
+ if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ iter->pos.snapshot = iter->snapshot;
+
+ btree_path_set_should_be_locked(iter->path);
+out_no_locked:
+ if (saved_path)
+ bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
+
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
+
+ return k;
+}
+
+/**
+ * bch2_btree_iter_prev() - returns first key less than iterator's current
+ * position
+ * @iter: iterator to peek from
+ *
+ * Returns: key if found, or an error extractable with bkey_err().
+ */
+struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
+{
+ if (!bch2_btree_iter_rewind(iter))
+ return bkey_s_c_null;
+
+ return bch2_btree_iter_peek_prev(iter);
+}
+
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bpos search_key;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify_entry_exit(iter);
+ EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
+ EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
+
+ /* extents can't span inode numbers: */
+ if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
+ if (iter->pos.inode == KEY_INODE_MAX)
+ return bkey_s_c_null;
+
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ }
+
+ search_key = btree_iter_search_key(iter);
+ iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
+
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (unlikely(ret)) {
+ k = bkey_s_c_err(ret);
+ goto out_no_locked;
+ }
+
+ if ((iter->flags & BTREE_ITER_CACHED) ||
+ !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
+ struct bkey_i *next_update;
+
+ if ((next_update = btree_trans_peek_updates(iter)) &&
+ bpos_eq(next_update->k.p, iter->pos)) {
+ iter->k = next_update->k;
+ k = bkey_i_to_s_c(next_update);
+ goto out;
+ }
+
+ if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
+ (k = btree_trans_peek_slot_journal(trans, iter)).k)
+ goto out;
+
+ if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
+ if (!bkey_err(k))
+ iter->k = *k.k;
+ /* We're not returning a key from iter->path: */
+ goto out_no_locked;
+ }
+
+ k = bch2_btree_path_peek_slot(iter->path, &iter->k);
+ if (unlikely(!k.k))
+ goto out_no_locked;
+ } else {
+ struct bpos next;
+ struct bpos end = iter->pos;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ end.offset = U64_MAX;
+
+ EBUG_ON(iter->path->level);
+
+ if (iter->flags & BTREE_ITER_INTENT) {
+ struct btree_iter iter2;
+
+ bch2_trans_copy_iter(&iter2, iter);
+ k = bch2_btree_iter_peek_upto(&iter2, end);
+
+ if (k.k && !bkey_err(k)) {
+ iter->k = iter2.k;
+ k.k = &iter->k;
+ }
+ bch2_trans_iter_exit(trans, &iter2);
+ } else {
+ struct bpos pos = iter->pos;
+
+ k = bch2_btree_iter_peek_upto(iter, end);
+ if (unlikely(bkey_err(k)))
+ bch2_btree_iter_set_pos(iter, pos);
+ else
+ iter->pos = pos;
+ }
+
+ if (unlikely(bkey_err(k)))
+ goto out_no_locked;
+
+ next = k.k ? bkey_start_pos(k.k) : POS_MAX;
+
+ if (bkey_lt(iter->pos, next)) {
+ bkey_init(&iter->k);
+ iter->k.p = iter->pos;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS) {
+ bch2_key_resize(&iter->k,
+ min_t(u64, KEY_SIZE_MAX,
+ (next.inode == iter->pos.inode
+ ? next.offset
+ : KEY_OFFSET_MAX) -
+ iter->pos.offset));
+ EBUG_ON(!iter->k.size);
+ }
+
+ k = (struct bkey_s_c) { &iter->k, NULL };
+ }
+ }
+out:
+ btree_path_set_should_be_locked(iter->path);
+out_no_locked:
+ bch2_btree_iter_verify_entry_exit(iter);
+ bch2_btree_iter_verify(iter);
+ ret = bch2_btree_iter_verify_ret(iter, k);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ return k;
+}
+
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
+{
+ if (!bch2_btree_iter_advance(iter))
+ return bkey_s_c_null;
+
+ return bch2_btree_iter_peek_slot(iter);
+}
+
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
+{
+ if (!bch2_btree_iter_rewind(iter))
+ return bkey_s_c_null;
+
+ return bch2_btree_iter_peek_slot(iter);
+}
+
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
+{
+ struct bkey_s_c k;
+
+ while (btree_trans_too_many_iters(iter->trans) ||
+ (k = bch2_btree_iter_peek_type(iter, iter->flags),
+ bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
+ bch2_trans_begin(iter->trans);
+
+ return k;
+}
+
+/* new transactional stuff: */
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
+{
+ struct btree_path *path;
+ unsigned i;
+
+ BUG_ON(trans->nr_sorted != hweight64(trans->paths_allocated));
+
+ trans_for_each_path(trans, path) {
+ BUG_ON(path->sorted_idx >= trans->nr_sorted);
+ BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
+ }
+
+ for (i = 0; i < trans->nr_sorted; i++) {
+ unsigned idx = trans->sorted[i];
+
+ EBUG_ON(!(trans->paths_allocated & (1ULL << idx)));
+ BUG_ON(trans->paths[idx].sorted_idx != i);
+ }
+}
+
+static void btree_trans_verify_sorted(struct btree_trans *trans)
+{
+ struct btree_path *path, *prev = NULL;
+ unsigned i;
+
+ if (!bch2_debug_check_iterators)
+ return;
+
+ trans_for_each_path_inorder(trans, path, i) {
+ if (prev && btree_path_cmp(prev, path) > 0) {
+ __bch2_dump_trans_paths_updates(trans, true);
+ panic("trans paths out of order!\n");
+ }
+ prev = path;
+ }
+}
+#else
+static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
+static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
+#endif
+
+void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
+{
+ int i, l = 0, r = trans->nr_sorted, inc = 1;
+ bool swapped;
+
+ btree_trans_verify_sorted_refs(trans);
+
+ if (trans->paths_sorted)
+ goto out;
+
+ /*
+ * Cocktail shaker sort: this is efficient because iterators will be
+ * mostly sorted.
+ */
+ do {
+ swapped = false;
+
+ for (i = inc > 0 ? l : r - 2;
+ i + 1 < r && i >= l;
+ i += inc) {
+ if (btree_path_cmp(trans->paths + trans->sorted[i],
+ trans->paths + trans->sorted[i + 1]) > 0) {
+ swap(trans->sorted[i], trans->sorted[i + 1]);
+ trans->paths[trans->sorted[i]].sorted_idx = i;
+ trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
+ swapped = true;
+ }
+ }
+
+ if (inc > 0)
+ --r;
+ else
+ l++;
+ inc = -inc;
+ } while (swapped);
+
+ trans->paths_sorted = true;
+out:
+ btree_trans_verify_sorted(trans);
+}
+
+static inline void btree_path_list_remove(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ unsigned i;
+
+ EBUG_ON(path->sorted_idx >= trans->nr_sorted);
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ trans->nr_sorted--;
+ memmove_u64s_down_small(trans->sorted + path->sorted_idx,
+ trans->sorted + path->sorted_idx + 1,
+ DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
+#else
+ array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
+#endif
+ for (i = path->sorted_idx; i < trans->nr_sorted; i++)
+ trans->paths[trans->sorted[i]].sorted_idx = i;
+
+ path->sorted_idx = U8_MAX;
+}
+
+static inline void btree_path_list_add(struct btree_trans *trans,
+ struct btree_path *pos,
+ struct btree_path *path)
+{
+ unsigned i;
+
+ path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
+ trans->sorted + path->sorted_idx,
+ DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
+ trans->nr_sorted++;
+ trans->sorted[path->sorted_idx] = path->idx;
+#else
+ array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
+#endif
+
+ for (i = path->sorted_idx; i < trans->nr_sorted; i++)
+ trans->paths[trans->sorted[i]].sorted_idx = i;
+
+ btree_trans_verify_sorted_refs(trans);
+}
+
+void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
+{
+ if (iter->update_path)
+ bch2_path_put_nokeep(trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ if (iter->path)
+ bch2_path_put(trans, iter->path,
+ iter->flags & BTREE_ITER_INTENT);
+ if (iter->key_cache_path)
+ bch2_path_put(trans, iter->key_cache_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->path = NULL;
+ iter->update_path = NULL;
+ iter->key_cache_path = NULL;
+}
+
+void bch2_trans_iter_init_outlined(struct btree_trans *trans,
+ struct btree_iter *iter,
+ enum btree_id btree_id, struct bpos pos,
+ unsigned flags)
+{
+ bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
+}
+
+void bch2_trans_node_iter_init(struct btree_trans *trans,
+ struct btree_iter *iter,
+ enum btree_id btree_id,
+ struct bpos pos,
+ unsigned locks_want,
+ unsigned depth,
+ unsigned flags)
+{
+ flags |= BTREE_ITER_NOT_EXTENTS;
+ flags |= __BTREE_ITER_ALL_SNAPSHOTS;
+ flags |= BTREE_ITER_ALL_SNAPSHOTS;
+
+ bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
+ __bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
+
+ iter->min_depth = depth;
+
+ BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
+ BUG_ON(iter->path->level != depth);
+ BUG_ON(iter->min_depth != depth);
+}
+
+void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
+{
+ *dst = *src;
+ if (src->path)
+ __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
+ if (src->update_path)
+ __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
+ dst->key_cache_path = NULL;
+}
+
+void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
+{
+ unsigned new_top = trans->mem_top + size;
+ size_t old_bytes = trans->mem_bytes;
+ size_t new_bytes = roundup_pow_of_two(new_top);
+ int ret;
+ void *new_mem;
+ void *p;
+
+ trans->mem_max = max(trans->mem_max, new_top);
+
+ WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
+
+ new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
+ if (unlikely(!new_mem)) {
+ bch2_trans_unlock(trans);
+
+ new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
+ if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
+ new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
+ new_bytes = BTREE_TRANS_MEM_MAX;
+ kfree(trans->mem);
+ }
+
+ if (!new_mem)
+ return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
+
+ trans->mem = new_mem;
+ trans->mem_bytes = new_bytes;
+
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ trans->mem = new_mem;
+ trans->mem_bytes = new_bytes;
+
+ if (old_bytes) {
+ trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
+ return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
+ }
+
+ p = trans->mem + trans->mem_top;
+ trans->mem_top += size;
+ memset(p, 0, size);
+ return p;
+}
+
+static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (path->cached && !btree_node_locked(path, 0))
+ path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
+
+ srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+ trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ trans->srcu_lock_time = jiffies;
+}
+
+/**
+ * bch2_trans_begin() - reset a transaction after a interrupted attempt
+ * @trans: transaction to reset
+ *
+ * Returns: current restart counter, to be used with trans_was_restarted()
+ *
+ * While iterating over nodes or updating nodes a attempt to lock a btree node
+ * may return BCH_ERR_transaction_restart when the trylock fails. When this
+ * occurs bch2_trans_begin() should be called and the transaction retried.
+ */
+u32 bch2_trans_begin(struct btree_trans *trans)
+{
+ struct btree_path *path;
+ u64 now;
+
+ bch2_trans_reset_updates(trans);
+
+ trans->restart_count++;
+ trans->mem_top = 0;
+
+ trans_for_each_path(trans, path) {
+ path->should_be_locked = false;
+
+ /*
+ * If the transaction wasn't restarted, we're presuming to be
+ * doing something new: dont keep iterators excpt the ones that
+ * are in use - except for the subvolumes btree:
+ */
+ if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
+ path->preserve = false;
+
+ /*
+ * XXX: we probably shouldn't be doing this if the transaction
+ * was restarted, but currently we still overflow transaction
+ * iterators if we do that
+ */
+ if (!path->ref && !path->preserve)
+ __bch2_path_free(trans, path);
+ else
+ path->preserve = false;
+ }
+
+ now = local_clock();
+ if (!trans->restarted &&
+ (need_resched() ||
+ now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
+ drop_locks_do(trans, (cond_resched(), 0));
+ now = local_clock();
+ }
+ trans->last_begin_time = now;
+
+ if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
+ bch2_trans_reset_srcu_lock(trans);
+
+ trans->last_begin_ip = _RET_IP_;
+ if (trans->restarted) {
+ bch2_btree_path_traverse_all(trans);
+ trans->notrace_relock_fail = false;
+ }
+
+ return trans->restart_count;
+}
+
+static struct btree_trans *bch2_trans_alloc(struct bch_fs *c)
+{
+ struct btree_trans *trans;
+
+ if (IS_ENABLED(__KERNEL__)) {
+ trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
+ if (trans)
+ return trans;
+ }
+
+ trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
+ /*
+ * paths need to be zeroed, bch2_check_for_deadlock looks at
+ * paths in other threads
+ */
+ memset(&trans->paths, 0, sizeof(trans->paths));
+ return trans;
+}
+
+const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
+
+unsigned bch2_trans_get_fn_idx(const char *fn)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
+ if (!bch2_btree_transaction_fns[i] ||
+ bch2_btree_transaction_fns[i] == fn) {
+ bch2_btree_transaction_fns[i] = fn;
+ return i;
+ }
+
+ pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
+ return i;
+}
+
+struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
+ __acquires(&c->btree_trans_barrier)
+{
+ struct btree_trans *trans;
+ struct btree_transaction_stats *s;
+
+ trans = bch2_trans_alloc(c);
+
+ memset(trans, 0, sizeof(*trans));
+ trans->c = c;
+ trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns)
+ ? bch2_btree_transaction_fns[fn_idx] : NULL;
+ trans->last_begin_time = local_clock();
+ trans->fn_idx = fn_idx;
+ trans->locking_wait.task = current;
+ trans->journal_replay_not_finished =
+ !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
+ closure_init_stack(&trans->ref);
+
+ s = btree_trans_stats(trans);
+ if (s && s->max_mem) {
+ unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
+
+ trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
+
+ if (!unlikely(trans->mem)) {
+ trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
+ trans->mem_bytes = BTREE_TRANS_MEM_MAX;
+ } else {
+ trans->mem_bytes = expected_mem_bytes;
+ }
+ }
+
+ if (s) {
+ trans->nr_max_paths = s->nr_max_paths;
+ trans->wb_updates_size = s->wb_updates_size;
+ }
+
+ trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ trans->srcu_lock_time = jiffies;
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ struct btree_trans *pos;
+
+ seqmutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(pos, &c->btree_trans_list, list) {
+ /*
+ * We'd much prefer to be stricter here and completely
+ * disallow multiple btree_trans in the same thread -
+ * but the data move path calls bch2_write when we
+ * already have a btree_trans initialized.
+ */
+ BUG_ON(trans->locking_wait.task->pid == pos->locking_wait.task->pid &&
+ bch2_trans_locked(pos));
+
+ if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
+ list_add_tail(&trans->list, &pos->list);
+ goto list_add_done;
+ }
+ }
+ list_add_tail(&trans->list, &c->btree_trans_list);
+list_add_done:
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
+
+ return trans;
+}
+
+static void check_btree_paths_leaked(struct btree_trans *trans)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (path->ref)
+ goto leaked;
+ return;
+leaked:
+ bch_err(c, "btree paths leaked from %s!", trans->fn);
+ trans_for_each_path(trans, path)
+ if (path->ref)
+ printk(KERN_ERR " btree %s %pS\n",
+ bch2_btree_ids[path->btree_id],
+ (void *) path->ip_allocated);
+ /* Be noisy about this: */
+ bch2_fatal_error(c);
+#endif
+}
+
+void bch2_trans_put(struct btree_trans *trans)
+ __releases(&c->btree_trans_barrier)
+{
+ struct btree_insert_entry *i;
+ struct bch_fs *c = trans->c;
+ struct btree_transaction_stats *s = btree_trans_stats(trans);
+
+ bch2_trans_unlock(trans);
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
+ seqmutex_lock(&c->btree_trans_lock);
+ list_del(&trans->list);
+ seqmutex_unlock(&c->btree_trans_lock);
+ }
+
+ closure_sync(&trans->ref);
+
+ if (s)
+ s->max_mem = max(s->max_mem, trans->mem_max);
+
+ trans_for_each_update(trans, i)
+ __btree_path_put(i->path, true);
+ trans->nr_updates = 0;
+
+ check_btree_paths_leaked(trans);
+
+ srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+
+ bch2_journal_preres_put(&c->journal, &trans->journal_preres);
+
+ kfree(trans->extra_journal_entries.data);
+
+ if (trans->fs_usage_deltas) {
+ if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
+ REPLICAS_DELTA_LIST_MAX)
+ mempool_free(trans->fs_usage_deltas,
+ &c->replicas_delta_pool);
+ else
+ kfree(trans->fs_usage_deltas);
+ }
+
+ if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
+ mempool_free(trans->mem, &c->btree_trans_mem_pool);
+ else
+ kfree(trans->mem);
+
+ /* Userspace doesn't have a real percpu implementation: */
+ if (IS_ENABLED(__KERNEL__))
+ trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
+ if (trans)
+ mempool_free(trans, &c->btree_trans_pool);
+}
+
+static void __maybe_unused
+bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
+ struct btree_bkey_cached_common *b)
+{
+ struct six_lock_count c = six_lock_counts(&b->lock);
+ struct task_struct *owner;
+ pid_t pid;
+
+ rcu_read_lock();
+ owner = READ_ONCE(b->lock.owner);
+ pid = owner ? owner->pid : 0;
+ rcu_read_unlock();
+
+ prt_tab(out);
+ prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
+ b->level, bch2_btree_ids[b->btree_id]);
+ bch2_bpos_to_text(out, btree_node_pos(b));
+
+ prt_tab(out);
+ prt_printf(out, " locks %u:%u:%u held by pid %u",
+ c.n[0], c.n[1], c.n[2], pid);
+}
+
+void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
+{
+ struct btree_path *path;
+ struct btree_bkey_cached_common *b;
+ static char lock_types[] = { 'r', 'i', 'w' };
+ unsigned l, idx;
+
+ if (!out->nr_tabstops) {
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 32);
+ }
+
+ prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn);
+
+ trans_for_each_path_safe(trans, path, idx) {
+ if (!path->nodes_locked)
+ continue;
+
+ prt_printf(out, " path %u %c l=%u %s:",
+ path->idx,
+ path->cached ? 'c' : 'b',
+ path->level,
+ bch2_btree_ids[path->btree_id]);
+ bch2_bpos_to_text(out, path->pos);
+ prt_newline(out);
+
+ for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+ if (btree_node_locked(path, l) &&
+ !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
+ prt_printf(out, " %c l=%u ",
+ lock_types[btree_node_locked_type(path, l)], l);
+ bch2_btree_bkey_cached_common_to_text(out, b);
+ prt_newline(out);
+ }
+ }
+ }
+
+ b = READ_ONCE(trans->locking);
+ if (b) {
+ prt_printf(out, " blocked for %lluus on",
+ div_u64(local_clock() - trans->locking_wait.start_time,
+ 1000));
+ prt_newline(out);
+ prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
+ bch2_btree_bkey_cached_common_to_text(out, b);
+ prt_newline(out);
+ }
+}
+
+void bch2_fs_btree_iter_exit(struct bch_fs *c)
+{
+ struct btree_transaction_stats *s;
+ struct btree_trans *trans;
+ int cpu;
+
+ trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
+ if (trans)
+ panic("%s leaked btree_trans\n", trans->fn);
+
+ if (c->btree_trans_bufs)
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu_ptr(c->btree_trans_bufs, cpu)->trans);
+ free_percpu(c->btree_trans_bufs);
+
+ for (s = c->btree_transaction_stats;
+ s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
+ s++) {
+ kfree(s->max_paths_text);
+ bch2_time_stats_exit(&s->lock_hold_times);
+ }
+
+ if (c->btree_trans_barrier_initialized)
+ cleanup_srcu_struct(&c->btree_trans_barrier);
+ mempool_exit(&c->btree_trans_mem_pool);
+ mempool_exit(&c->btree_trans_pool);
+}
+
+int bch2_fs_btree_iter_init(struct bch_fs *c)
+{
+ struct btree_transaction_stats *s;
+ int ret;
+
+ for (s = c->btree_transaction_stats;
+ s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
+ s++) {
+ bch2_time_stats_init(&s->lock_hold_times);
+ mutex_init(&s->lock);
+ }
+
+ INIT_LIST_HEAD(&c->btree_trans_list);
+ seqmutex_init(&c->btree_trans_lock);
+
+ c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
+ if (!c->btree_trans_bufs)
+ return -ENOMEM;
+
+ ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
+ sizeof(struct btree_trans)) ?:
+ mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
+ BTREE_TRANS_MEM_MAX) ?:
+ init_srcu_struct(&c->btree_trans_barrier);
+ if (!ret)
+ c->btree_trans_barrier_initialized = true;
+ return ret;
+}
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
new file mode 100644
index 000000000000..fbe273453db3
--- /dev/null
+++ b/fs/bcachefs/btree_iter.h
@@ -0,0 +1,939 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_ITER_H
+#define _BCACHEFS_BTREE_ITER_H
+
+#include "bset.h"
+#include "btree_types.h"
+#include "trace.h"
+
+static inline int __bkey_err(const struct bkey *k)
+{
+ return PTR_ERR_OR_ZERO(k);
+}
+
+#define bkey_err(_k) __bkey_err((_k).k)
+
+static inline void __btree_path_get(struct btree_path *path, bool intent)
+{
+ path->ref++;
+ path->intent_ref += intent;
+}
+
+static inline bool __btree_path_put(struct btree_path *path, bool intent)
+{
+ EBUG_ON(!path->ref);
+ EBUG_ON(!path->intent_ref && intent);
+ path->intent_ref -= intent;
+ return --path->ref == 0;
+}
+
+static inline void btree_path_set_dirty(struct btree_path *path,
+ enum btree_path_uptodate u)
+{
+ path->uptodate = max_t(unsigned, path->uptodate, u);
+}
+
+static inline struct btree *btree_path_node(struct btree_path *path,
+ unsigned level)
+{
+ return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
+}
+
+static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
+ const struct btree *b, unsigned level)
+{
+ return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
+}
+
+static inline struct btree *btree_node_parent(struct btree_path *path,
+ struct btree *b)
+{
+ return btree_path_node(path, b->c.level + 1);
+}
+
+/* Iterate over paths within a transaction: */
+
+void __bch2_btree_trans_sort_paths(struct btree_trans *);
+
+static inline void btree_trans_sort_paths(struct btree_trans *trans)
+{
+ if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
+ trans->paths_sorted)
+ return;
+ __bch2_btree_trans_sort_paths(trans);
+}
+
+static inline struct btree_path *
+__trans_next_path(struct btree_trans *trans, unsigned idx)
+{
+ u64 l;
+
+ if (idx == BTREE_ITER_MAX)
+ return NULL;
+
+ l = trans->paths_allocated >> idx;
+ if (!l)
+ return NULL;
+
+ idx += __ffs64(l);
+ EBUG_ON(idx >= BTREE_ITER_MAX);
+ EBUG_ON(trans->paths[idx].idx != idx);
+ return &trans->paths[idx];
+}
+
+#define trans_for_each_path_from(_trans, _path, _start) \
+ for (_path = __trans_next_path((_trans), _start); \
+ (_path); \
+ _path = __trans_next_path((_trans), (_path)->idx + 1))
+
+#define trans_for_each_path(_trans, _path) \
+ trans_for_each_path_from(_trans, _path, 0)
+
+static inline struct btree_path *
+__trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
+{
+ u64 l;
+
+ if (*idx == BTREE_ITER_MAX)
+ return NULL;
+
+ l = trans->paths_allocated >> *idx;
+ if (!l)
+ return NULL;
+
+ *idx += __ffs64(l);
+ EBUG_ON(*idx >= BTREE_ITER_MAX);
+ return &trans->paths[*idx];
+}
+
+/*
+ * This version is intended to be safe for use on a btree_trans that is owned by
+ * another thread, for bch2_btree_trans_to_text();
+ */
+#define trans_for_each_path_safe_from(_trans, _path, _idx, _start) \
+ for (_idx = _start; \
+ (_path = __trans_next_path_safe((_trans), &_idx)); \
+ _idx++)
+
+#define trans_for_each_path_safe(_trans, _path, _idx) \
+ trans_for_each_path_safe_from(_trans, _path, _idx, 0)
+
+static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
+{
+ unsigned idx = path ? path->sorted_idx + 1 : 0;
+
+ EBUG_ON(idx > trans->nr_sorted);
+
+ return idx < trans->nr_sorted
+ ? trans->paths + trans->sorted[idx]
+ : NULL;
+}
+
+static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
+{
+ unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
+
+ return idx
+ ? trans->paths + trans->sorted[idx - 1]
+ : NULL;
+}
+
+#define trans_for_each_path_inorder(_trans, _path, _i) \
+ for (_i = 0; \
+ ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
+ _i++)
+
+#define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
+ for (_i = trans->nr_sorted - 1; \
+ ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
+ --_i)
+
+static inline bool __path_has_node(const struct btree_path *path,
+ const struct btree *b)
+{
+ return path->l[b->c.level].b == b &&
+ btree_node_lock_seq_matches(path, b, b->c.level);
+}
+
+static inline struct btree_path *
+__trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
+ unsigned idx)
+{
+ struct btree_path *path = __trans_next_path(trans, idx);
+
+ while (path && !__path_has_node(path, b))
+ path = __trans_next_path(trans, path->idx + 1);
+
+ return path;
+}
+
+#define trans_for_each_path_with_node(_trans, _b, _path) \
+ for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
+ (_path); \
+ _path = __trans_next_path_with_node((_trans), (_b), \
+ (_path)->idx + 1))
+
+struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
+ bool, unsigned long);
+
+static inline struct btree_path * __must_check
+bch2_btree_path_make_mut(struct btree_trans *trans,
+ struct btree_path *path, bool intent,
+ unsigned long ip)
+{
+ if (path->ref > 1 || path->preserve)
+ path = __bch2_btree_path_make_mut(trans, path, intent, ip);
+ path->should_be_locked = false;
+ return path;
+}
+
+struct btree_path * __must_check
+__bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
+ struct bpos, bool, unsigned long, int);
+
+static inline struct btree_path * __must_check
+bch2_btree_path_set_pos(struct btree_trans *trans,
+ struct btree_path *path, struct bpos new_pos,
+ bool intent, unsigned long ip)
+{
+ int cmp = bpos_cmp(new_pos, path->pos);
+
+ return cmp
+ ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
+ : path;
+}
+
+int __must_check bch2_btree_path_traverse_one(struct btree_trans *, struct btree_path *,
+ unsigned, unsigned long);
+
+static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
+ struct btree_path *path, unsigned flags)
+{
+ if (path->uptodate < BTREE_ITER_NEED_RELOCK)
+ return 0;
+
+ return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
+}
+
+int __must_check bch2_btree_path_traverse(struct btree_trans *,
+ struct btree_path *, unsigned);
+struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
+ unsigned, unsigned, unsigned, unsigned long);
+struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
+
+/*
+ * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
+ * different snapshot:
+ */
+static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
+{
+ struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
+
+ if (k.k && bpos_eq(path->pos, k.k->p))
+ return k;
+
+ bkey_init(u);
+ u->p = path->pos;
+ return (struct bkey_s_c) { u, NULL };
+}
+
+struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
+ struct btree_iter *, struct bpos);
+
+void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
+
+int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
+
+static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
+{
+ return mutex_trylock(lock)
+ ? 0
+ : __bch2_trans_mutex_lock(trans, lock);
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_trans_verify_paths(struct btree_trans *);
+void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
+ struct bpos, bool);
+#else
+static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
+static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
+ struct bpos pos, bool key_cache) {}
+#endif
+
+void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
+ struct btree *, struct bkey_packed *);
+void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
+ struct btree *, struct btree_node_iter *,
+ struct bkey_packed *, unsigned, unsigned);
+
+int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
+
+void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
+
+int bch2_trans_relock(struct btree_trans *);
+int bch2_trans_relock_notrace(struct btree_trans *);
+void bch2_trans_unlock(struct btree_trans *);
+bool bch2_trans_locked(struct btree_trans *);
+
+static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
+{
+ return restart_count != trans->restart_count
+ ? -BCH_ERR_transaction_restart_nested
+ : 0;
+}
+
+void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
+
+static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
+ u32 restart_count)
+{
+ if (trans_was_restarted(trans, restart_count))
+ bch2_trans_restart_error(trans, restart_count);
+}
+
+void __noreturn bch2_trans_in_restart_error(struct btree_trans *);
+
+static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
+{
+ if (trans->restarted)
+ bch2_trans_in_restart_error(trans);
+}
+
+__always_inline
+static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
+{
+ BUG_ON(err <= 0);
+ BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
+
+ trans->restarted = err;
+ trans->last_restarted_ip = _THIS_IP_;
+ return -err;
+}
+
+__always_inline
+static int btree_trans_restart(struct btree_trans *trans, int err)
+{
+ btree_trans_restart_nounlock(trans, err);
+ return -err;
+}
+
+bool bch2_btree_node_upgrade(struct btree_trans *,
+ struct btree_path *, unsigned);
+
+void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
+
+static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ unsigned new_locks_want = path->level + !!path->intent_ref;
+
+ if (path->locks_want > new_locks_want)
+ __bch2_btree_path_downgrade(trans, path, new_locks_want);
+}
+
+void bch2_trans_downgrade(struct btree_trans *);
+
+void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
+void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
+
+int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
+int __must_check bch2_btree_iter_traverse(struct btree_iter *);
+
+struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
+struct btree *bch2_btree_iter_next_node(struct btree_iter *);
+
+struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+
+struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *);
+
+static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+{
+ return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
+}
+
+struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
+
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
+
+bool bch2_btree_iter_advance(struct btree_iter *);
+bool bch2_btree_iter_rewind(struct btree_iter *);
+
+static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+{
+ iter->k.type = KEY_TYPE_deleted;
+ iter->k.p.inode = iter->pos.inode = new_pos.inode;
+ iter->k.p.offset = iter->pos.offset = new_pos.offset;
+ iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
+ iter->k.size = 0;
+}
+
+static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+{
+ if (unlikely(iter->update_path))
+ bch2_path_put(iter->trans, iter->update_path,
+ iter->flags & BTREE_ITER_INTENT);
+ iter->update_path = NULL;
+
+ if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
+ new_pos.snapshot = iter->snapshot;
+
+ __bch2_btree_iter_set_pos(iter, new_pos);
+}
+
+static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
+{
+ BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
+ iter->pos = bkey_start_pos(&iter->k);
+}
+
+static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
+{
+ struct bpos pos = iter->pos;
+
+ iter->snapshot = snapshot;
+ pos.snapshot = snapshot;
+ bch2_btree_iter_set_pos(iter, pos);
+}
+
+void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
+
+static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
+ unsigned btree_id,
+ unsigned flags)
+{
+ if (flags & BTREE_ITER_ALL_LEVELS)
+ flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
+
+ if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
+ btree_node_type_is_extents(btree_id))
+ flags |= BTREE_ITER_IS_EXTENTS;
+
+ if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
+ !btree_type_has_snapshots(btree_id))
+ flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
+
+ if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ btree_type_has_snapshots(btree_id))
+ flags |= BTREE_ITER_FILTER_SNAPSHOTS;
+
+ if (trans->journal_replay_not_finished)
+ flags |= BTREE_ITER_WITH_JOURNAL;
+
+ return flags;
+}
+
+static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
+ unsigned btree_id,
+ unsigned flags)
+{
+ if (!btree_id_cached(trans->c, btree_id)) {
+ flags &= ~BTREE_ITER_CACHED;
+ flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+ } else if (!(flags & BTREE_ITER_CACHED))
+ flags |= BTREE_ITER_WITH_KEY_CACHE;
+
+ return __bch2_btree_iter_flags(trans, btree_id, flags);
+}
+
+static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned locks_want,
+ unsigned depth,
+ unsigned flags,
+ unsigned long ip)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->trans = trans;
+ iter->btree_id = btree_id;
+ iter->flags = flags;
+ iter->snapshot = pos.snapshot;
+ iter->pos = pos;
+ iter->k.p = pos;
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ iter->ip_allocated = ip;
+#endif
+ iter->path = bch2_path_get(trans, btree_id, iter->pos,
+ locks_want, depth, flags, ip);
+}
+
+void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
+ enum btree_id, struct bpos, unsigned);
+
+static inline void bch2_trans_iter_init(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags)
+{
+ if (__builtin_constant_p(btree_id) &&
+ __builtin_constant_p(flags))
+ bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _THIS_IP_);
+ else
+ bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
+}
+
+void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
+ enum btree_id, struct bpos,
+ unsigned, unsigned, unsigned);
+void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
+
+static inline void set_btree_iter_dontneed(struct btree_iter *iter)
+{
+ if (!iter->trans->restarted)
+ iter->path->preserve = false;
+}
+
+void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
+
+static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
+{
+ size = roundup(size, 8);
+
+ if (likely(trans->mem_top + size <= trans->mem_bytes)) {
+ void *p = trans->mem + trans->mem_top;
+
+ trans->mem_top += size;
+ memset(p, 0, size);
+ return p;
+ } else {
+ return __bch2_trans_kmalloc(trans, size);
+ }
+}
+
+static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
+{
+ size = roundup(size, 8);
+
+ if (likely(trans->mem_top + size <= trans->mem_bytes)) {
+ void *p = trans->mem + trans->mem_top;
+
+ trans->mem_top += size;
+ return p;
+ } else {
+ return __bch2_trans_kmalloc(trans, size);
+ }
+}
+
+static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags, unsigned type)
+{
+ struct bkey_s_c k;
+
+ bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
+ k = bch2_btree_iter_peek_slot(iter);
+
+ if (!bkey_err(k) && type && k.k->type != type)
+ k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
+ if (unlikely(bkey_err(k)))
+ bch2_trans_iter_exit(trans, iter);
+ return k;
+}
+
+static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags)
+{
+ return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
+}
+
+#define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
+ bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \
+ _btree_id, _pos, _flags, KEY_TYPE_##_type))
+
+static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags, unsigned type,
+ unsigned val_size, void *val)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
+ ret = bkey_err(k);
+ if (!ret) {
+ unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
+
+ memcpy(val, k.v, b);
+ if (unlikely(b < sizeof(*val)))
+ memset((void *) val + b, 0, sizeof(*val) - b);
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ return ret;
+}
+
+#define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
+ __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \
+ KEY_TYPE_##_type, sizeof(*_val), _val)
+
+u32 bch2_trans_begin(struct btree_trans *);
+
+/*
+ * XXX
+ * this does not handle transaction restarts from bch2_btree_iter_next_node()
+ * correctly
+ */
+#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ _locks_want, _depth, _flags, _b, _ret) \
+ for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
+ _start, _locks_want, _depth, _flags); \
+ (_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)), \
+ !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
+ (_b) = bch2_btree_iter_next_node(&(_iter)))
+
+#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ _flags, _b, _ret) \
+ __for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ 0, 0, _flags, _b, _ret)
+
+static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
+ unsigned flags)
+{
+ BUG_ON(flags & BTREE_ITER_ALL_LEVELS);
+
+ return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
+ bch2_btree_iter_peek_prev(iter);
+}
+
+static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
+ unsigned flags)
+{
+ return flags & BTREE_ITER_ALL_LEVELS ? bch2_btree_iter_peek_all_levels(iter) :
+ flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
+ bch2_btree_iter_peek(iter);
+}
+
+static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
+ struct bpos end,
+ unsigned flags)
+{
+ if (!(flags & BTREE_ITER_SLOTS))
+ return bch2_btree_iter_peek_upto(iter, end);
+
+ if (bkey_gt(iter->pos, end))
+ return bkey_s_c_null;
+
+ return bch2_btree_iter_peek_slot(iter);
+}
+
+static inline int btree_trans_too_many_iters(struct btree_trans *trans)
+{
+ if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) {
+ trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
+ }
+
+ return 0;
+}
+
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
+
+static inline struct bkey_s_c
+__bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned flags)
+{
+ struct bkey_s_c k;
+
+ while (btree_trans_too_many_iters(trans) ||
+ (k = bch2_btree_iter_peek_type(iter, flags),
+ bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
+ bch2_trans_begin(trans);
+
+ return k;
+}
+
+static inline struct bkey_s_c
+__bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end,
+ unsigned flags)
+{
+ struct bkey_s_c k;
+
+ while (btree_trans_too_many_iters(trans) ||
+ (k = bch2_btree_iter_peek_upto_type(iter, end, flags),
+ bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
+ bch2_trans_begin(trans);
+
+ return k;
+}
+
+#define lockrestart_do(_trans, _do) \
+({ \
+ u32 _restart_count; \
+ int _ret2; \
+ \
+ do { \
+ _restart_count = bch2_trans_begin(_trans); \
+ _ret2 = (_do); \
+ } while (bch2_err_matches(_ret2, BCH_ERR_transaction_restart)); \
+ \
+ if (!_ret2) \
+ bch2_trans_verify_not_restarted(_trans, _restart_count);\
+ \
+ _ret2; \
+})
+
+/*
+ * nested_lockrestart_do(), nested_commit_do():
+ *
+ * These are like lockrestart_do() and commit_do(), with two differences:
+ *
+ * - We don't call bch2_trans_begin() unless we had a transaction restart
+ * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
+ * transaction restart
+ */
+#define nested_lockrestart_do(_trans, _do) \
+({ \
+ u32 _restart_count, _orig_restart_count; \
+ int _ret2; \
+ \
+ _restart_count = _orig_restart_count = (_trans)->restart_count; \
+ \
+ while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
+ _restart_count = bch2_trans_begin(_trans); \
+ \
+ if (!_ret2) \
+ bch2_trans_verify_not_restarted(_trans, _restart_count);\
+ \
+ _ret2 ?: trans_was_restarted(_trans, _restart_count); \
+})
+
+#define for_each_btree_key2(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _do) \
+({ \
+ int _ret3 = 0; \
+ \
+ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ \
+ while (1) { \
+ u32 _restart_count = bch2_trans_begin(_trans); \
+ \
+ _ret3 = 0; \
+ (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
+ if (!(_k).k) \
+ break; \
+ \
+ _ret3 = bkey_err(_k) ?: (_do); \
+ if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\
+ continue; \
+ if (_ret3) \
+ break; \
+ bch2_trans_verify_not_restarted(_trans, _restart_count);\
+ if (!bch2_btree_iter_advance(&(_iter))) \
+ break; \
+ } \
+ \
+ bch2_trans_iter_exit((_trans), &(_iter)); \
+ _ret3; \
+})
+
+#define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
+ _start, _end, _flags, _k, _do) \
+({ \
+ int _ret3 = 0; \
+ \
+ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ \
+ while (1) { \
+ u32 _restart_count = bch2_trans_begin(_trans); \
+ \
+ _ret3 = 0; \
+ (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\
+ if (!(_k).k) \
+ break; \
+ \
+ _ret3 = bkey_err(_k) ?: (_do); \
+ if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\
+ continue; \
+ if (_ret3) \
+ break; \
+ bch2_trans_verify_not_restarted(_trans, _restart_count);\
+ if (!bch2_btree_iter_advance(&(_iter))) \
+ break; \
+ } \
+ \
+ bch2_trans_iter_exit((_trans), &(_iter)); \
+ _ret3; \
+})
+
+#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _do) \
+({ \
+ int _ret3 = 0; \
+ \
+ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ \
+ while (1) { \
+ u32 _restart_count = bch2_trans_begin(_trans); \
+ (_k) = bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\
+ if (!(_k).k) { \
+ _ret3 = 0; \
+ break; \
+ } \
+ \
+ _ret3 = bkey_err(_k) ?: (_do); \
+ if (bch2_err_matches(_ret3, BCH_ERR_transaction_restart))\
+ continue; \
+ if (_ret3) \
+ break; \
+ bch2_trans_verify_not_restarted(_trans, _restart_count);\
+ if (!bch2_btree_iter_rewind(&(_iter))) \
+ break; \
+ } \
+ \
+ bch2_trans_iter_exit((_trans), &(_iter)); \
+ _ret3; \
+})
+
+#define for_each_btree_key_commit(_trans, _iter, _btree_id, \
+ _start, _iter_flags, _k, \
+ _disk_res, _journal_seq, _commit_flags,\
+ _do) \
+ for_each_btree_key2(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
+ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
+ (_journal_seq), (_commit_flags)))
+
+#define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id, \
+ _start, _iter_flags, _k, \
+ _disk_res, _journal_seq, _commit_flags,\
+ _do) \
+ for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
+ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
+ (_journal_seq), (_commit_flags)))
+
+#define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
+ _start, _end, _iter_flags, _k, \
+ _disk_res, _journal_seq, _commit_flags,\
+ _do) \
+ for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
+ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
+ (_journal_seq), (_commit_flags)))
+
+#define for_each_btree_key(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
+#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
+ _start, _end, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = __bch2_btree_iter_peek_upto_and_restart((_trans), \
+ &(_iter), _end, _flags),\
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
+#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
+#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
+ _start, _end, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
+#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
+ for (; \
+ (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
+#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
+ for (; \
+ (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
+#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
+ for (; \
+ (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_advance(&(_iter)))
+
+#define drop_locks_do(_trans, _do) \
+({ \
+ bch2_trans_unlock(_trans); \
+ _do ?: bch2_trans_relock(_trans); \
+})
+
+#define allocate_dropping_locks_errcode(_trans, _do) \
+({ \
+ gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
+ int _ret = _do; \
+ \
+ if (bch2_err_matches(_ret, ENOMEM)) { \
+ _gfp = GFP_KERNEL; \
+ _ret = drop_locks_do(trans, _do); \
+ } \
+ _ret; \
+})
+
+#define allocate_dropping_locks(_trans, _ret, _do) \
+({ \
+ gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
+ typeof(_do) _p = _do; \
+ \
+ _ret = 0; \
+ if (unlikely(!_p)) { \
+ _gfp = GFP_KERNEL; \
+ _ret = drop_locks_do(trans, ((_p = _do), 0)); \
+ } \
+ _p; \
+})
+
+/* new multiple iterator interface: */
+
+void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
+void bch2_btree_path_to_text(struct printbuf *, struct btree_path *);
+void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
+void bch2_dump_trans_updates(struct btree_trans *);
+void bch2_dump_trans_paths_updates(struct btree_trans *);
+
+struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
+void bch2_trans_put(struct btree_trans *);
+
+extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
+unsigned bch2_trans_get_fn_idx(const char *);
+
+#define bch2_trans_get(_c) \
+({ \
+ static unsigned trans_fn_idx; \
+ \
+ if (unlikely(!trans_fn_idx)) \
+ trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
+ __bch2_trans_get(_c, trans_fn_idx); \
+})
+
+void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
+
+void bch2_fs_btree_iter_exit(struct bch_fs *);
+int bch2_fs_btree_iter_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_BTREE_ITER_H */
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
new file mode 100644
index 000000000000..58a981bcf3aa
--- /dev/null
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bset.h"
+#include "btree_journal_iter.h"
+#include "journal_io.h"
+
+#include <linux/sort.h>
+
+/*
+ * For managing keys we read from the journal: until journal replay works normal
+ * btree lookups need to be able to find and return keys from the journal where
+ * they overwrite what's in the btree, so we have a special iterator and
+ * operations for the regular btree iter code to use:
+ */
+
+static int __journal_key_cmp(enum btree_id l_btree_id,
+ unsigned l_level,
+ struct bpos l_pos,
+ const struct journal_key *r)
+{
+ return (cmp_int(l_btree_id, r->btree_id) ?:
+ cmp_int(l_level, r->level) ?:
+ bpos_cmp(l_pos, r->k->k.p));
+}
+
+static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
+{
+ return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
+}
+
+static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
+{
+ size_t gap_size = keys->size - keys->nr;
+
+ if (idx >= keys->gap)
+ idx += gap_size;
+ return idx;
+}
+
+static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
+{
+ return keys->d + idx_to_pos(keys, idx);
+}
+
+static size_t __bch2_journal_key_search(struct journal_keys *keys,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
+{
+ size_t l = 0, r = keys->nr, m;
+
+ while (l < r) {
+ m = l + ((r - l) >> 1);
+ if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
+ l = m + 1;
+ else
+ r = m;
+ }
+
+ BUG_ON(l < keys->nr &&
+ __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
+
+ BUG_ON(l &&
+ __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
+
+ return l;
+}
+
+static size_t bch2_journal_key_search(struct journal_keys *keys,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
+{
+ return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
+}
+
+struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
+ unsigned level, struct bpos pos,
+ struct bpos end_pos, size_t *idx)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ unsigned iters = 0;
+ struct journal_key *k;
+search:
+ if (!*idx)
+ *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
+
+ while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
+ if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
+ return NULL;
+
+ if (__journal_key_cmp(btree_id, level, pos, k) <= 0 &&
+ !k->overwritten)
+ return k->k;
+
+ (*idx)++;
+ iters++;
+ if (iters == 10) {
+ *idx = 0;
+ goto search;
+ }
+ }
+
+ return NULL;
+}
+
+struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
+ unsigned level, struct bpos pos)
+{
+ size_t idx = 0;
+
+ return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
+}
+
+static void journal_iters_fix(struct bch_fs *c)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ /* The key we just inserted is immediately before the gap: */
+ size_t gap_end = keys->gap + (keys->size - keys->nr);
+ struct btree_and_journal_iter *iter;
+
+ /*
+ * If an iterator points one after the key we just inserted, decrement
+ * the iterator so it points at the key we just inserted - if the
+ * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
+ * handle that:
+ */
+ list_for_each_entry(iter, &c->journal_iters, journal.list)
+ if (iter->journal.idx == gap_end)
+ iter->journal.idx = keys->gap - 1;
+}
+
+static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ struct journal_iter *iter;
+ size_t gap_size = keys->size - keys->nr;
+
+ list_for_each_entry(iter, &c->journal_iters, list) {
+ if (iter->idx > old_gap)
+ iter->idx -= gap_size;
+ if (iter->idx >= new_gap)
+ iter->idx += gap_size;
+ }
+}
+
+int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_i *k)
+{
+ struct journal_key n = {
+ .btree_id = id,
+ .level = level,
+ .k = k,
+ .allocated = true,
+ /*
+ * Ensure these keys are done last by journal replay, to unblock
+ * journal reclaim:
+ */
+ .journal_seq = U32_MAX,
+ };
+ struct journal_keys *keys = &c->journal_keys;
+ size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
+
+ BUG_ON(test_bit(BCH_FS_RW, &c->flags));
+
+ if (idx < keys->size &&
+ journal_key_cmp(&n, &keys->d[idx]) == 0) {
+ if (keys->d[idx].allocated)
+ kfree(keys->d[idx].k);
+ keys->d[idx] = n;
+ return 0;
+ }
+
+ if (idx > keys->gap)
+ idx -= keys->size - keys->nr;
+
+ if (keys->nr == keys->size) {
+ struct journal_keys new_keys = {
+ .nr = keys->nr,
+ .size = max_t(size_t, keys->size, 8) * 2,
+ };
+
+ new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
+ if (!new_keys.d) {
+ bch_err(c, "%s: error allocating new key array (size %zu)",
+ __func__, new_keys.size);
+ return -BCH_ERR_ENOMEM_journal_key_insert;
+ }
+
+ /* Since @keys was full, there was no gap: */
+ memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
+ kvfree(keys->d);
+ *keys = new_keys;
+
+ /* And now the gap is at the end: */
+ keys->gap = keys->nr;
+ }
+
+ journal_iters_move_gap(c, keys->gap, idx);
+
+ move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
+ keys->gap = idx;
+
+ keys->nr++;
+ keys->d[keys->gap++] = n;
+
+ journal_iters_fix(c);
+
+ return 0;
+}
+
+/*
+ * Can only be used from the recovery thread while we're still RO - can't be
+ * used once we've got RW, as journal_keys is at that point used by multiple
+ * threads:
+ */
+int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_i *k)
+{
+ struct bkey_i *n;
+ int ret;
+
+ n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
+ if (!n)
+ return -BCH_ERR_ENOMEM_journal_key_insert;
+
+ bkey_copy(n, k);
+ ret = bch2_journal_key_insert_take(c, id, level, n);
+ if (ret)
+ kfree(n);
+ return ret;
+}
+
+int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bpos pos)
+{
+ struct bkey_i whiteout;
+
+ bkey_init(&whiteout.k);
+ whiteout.k.p = pos;
+
+ return bch2_journal_key_insert(c, id, level, &whiteout);
+}
+
+void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
+ unsigned level, struct bpos pos)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ size_t idx = bch2_journal_key_search(keys, btree, level, pos);
+
+ if (idx < keys->size &&
+ keys->d[idx].btree_id == btree &&
+ keys->d[idx].level == level &&
+ bpos_eq(keys->d[idx].k->k.p, pos))
+ keys->d[idx].overwritten = true;
+}
+
+static void bch2_journal_iter_advance(struct journal_iter *iter)
+{
+ if (iter->idx < iter->keys->size) {
+ iter->idx++;
+ if (iter->idx == iter->keys->gap)
+ iter->idx += iter->keys->size - iter->keys->nr;
+ }
+}
+
+static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
+{
+ struct journal_key *k = iter->keys->d + iter->idx;
+
+ while (k < iter->keys->d + iter->keys->size &&
+ k->btree_id == iter->btree_id &&
+ k->level == iter->level) {
+ if (!k->overwritten)
+ return bkey_i_to_s_c(k->k);
+
+ bch2_journal_iter_advance(iter);
+ k = iter->keys->d + iter->idx;
+ }
+
+ return bkey_s_c_null;
+}
+
+static void bch2_journal_iter_exit(struct journal_iter *iter)
+{
+ list_del(&iter->list);
+}
+
+static void bch2_journal_iter_init(struct bch_fs *c,
+ struct journal_iter *iter,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
+{
+ iter->btree_id = id;
+ iter->level = level;
+ iter->keys = &c->journal_keys;
+ iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
+}
+
+static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
+{
+ return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
+ iter->b, &iter->unpacked);
+}
+
+static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
+{
+ bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
+}
+
+void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
+{
+ if (bpos_eq(iter->pos, SPOS_MAX))
+ iter->at_end = true;
+ else
+ iter->pos = bpos_successor(iter->pos);
+}
+
+struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
+{
+ struct bkey_s_c btree_k, journal_k, ret;
+again:
+ if (iter->at_end)
+ return bkey_s_c_null;
+
+ while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
+ bpos_lt(btree_k.k->p, iter->pos))
+ bch2_journal_iter_advance_btree(iter);
+
+ while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
+ bpos_lt(journal_k.k->p, iter->pos))
+ bch2_journal_iter_advance(&iter->journal);
+
+ ret = journal_k.k &&
+ (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
+ ? journal_k
+ : btree_k;
+
+ if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
+ ret = bkey_s_c_null;
+
+ if (ret.k) {
+ iter->pos = ret.k->p;
+ if (bkey_deleted(ret.k)) {
+ bch2_btree_and_journal_iter_advance(iter);
+ goto again;
+ }
+ } else {
+ iter->pos = SPOS_MAX;
+ iter->at_end = true;
+ }
+
+ return ret;
+}
+
+void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
+{
+ bch2_journal_iter_exit(&iter->journal);
+}
+
+void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
+ struct bch_fs *c,
+ struct btree *b,
+ struct btree_node_iter node_iter,
+ struct bpos pos)
+{
+ memset(iter, 0, sizeof(*iter));
+
+ iter->b = b;
+ iter->node_iter = node_iter;
+ bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
+ INIT_LIST_HEAD(&iter->journal.list);
+ iter->pos = b->data->min_key;
+ iter->at_end = false;
+}
+
+/*
+ * this version is used by btree_gc before filesystem has gone RW and
+ * multithreaded, so uses the journal_iters list:
+ */
+void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
+ struct bch_fs *c,
+ struct btree *b)
+{
+ struct btree_node_iter node_iter;
+
+ bch2_btree_node_iter_init_from_start(&node_iter, b);
+ __bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
+ list_add(&iter->journal.list, &c->journal_iters);
+}
+
+/* sort and dedup all keys in the journal: */
+
+void bch2_journal_entries_free(struct bch_fs *c)
+{
+ struct journal_replay **i;
+ struct genradix_iter iter;
+
+ genradix_for_each(&c->journal_entries, iter, i)
+ if (*i)
+ kvpfree(*i, offsetof(struct journal_replay, j) +
+ vstruct_bytes(&(*i)->j));
+ genradix_free(&c->journal_entries);
+}
+
+/*
+ * When keys compare equal, oldest compares first:
+ */
+static int journal_sort_key_cmp(const void *_l, const void *_r)
+{
+ const struct journal_key *l = _l;
+ const struct journal_key *r = _r;
+
+ return journal_key_cmp(l, r) ?:
+ cmp_int(l->journal_seq, r->journal_seq) ?:
+ cmp_int(l->journal_offset, r->journal_offset);
+}
+
+void bch2_journal_keys_free(struct journal_keys *keys)
+{
+ struct journal_key *i;
+
+ move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
+ keys->gap = keys->nr;
+
+ for (i = keys->d; i < keys->d + keys->nr; i++)
+ if (i->allocated)
+ kfree(i->k);
+
+ kvfree(keys->d);
+ keys->d = NULL;
+ keys->nr = keys->gap = keys->size = 0;
+}
+
+static void __journal_keys_sort(struct journal_keys *keys)
+{
+ struct journal_key *src, *dst;
+
+ sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
+
+ src = dst = keys->d;
+ while (src < keys->d + keys->nr) {
+ while (src + 1 < keys->d + keys->nr &&
+ src[0].btree_id == src[1].btree_id &&
+ src[0].level == src[1].level &&
+ bpos_eq(src[0].k->k.p, src[1].k->k.p))
+ src++;
+
+ *dst++ = *src++;
+ }
+
+ keys->nr = dst - keys->d;
+}
+
+int bch2_journal_keys_sort(struct bch_fs *c)
+{
+ struct genradix_iter iter;
+ struct journal_replay *i, **_i;
+ struct jset_entry *entry;
+ struct bkey_i *k;
+ struct journal_keys *keys = &c->journal_keys;
+ size_t nr_keys = 0, nr_read = 0;
+
+ genradix_for_each(&c->journal_entries, iter, _i) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ for_each_jset_key(k, entry, &i->j)
+ nr_keys++;
+ }
+
+ if (!nr_keys)
+ return 0;
+
+ keys->size = roundup_pow_of_two(nr_keys);
+
+ keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
+ if (!keys->d) {
+ bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys); trying slowpath",
+ nr_keys);
+
+ do {
+ keys->size >>= 1;
+ keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
+ } while (!keys->d && keys->size > nr_keys / 8);
+
+ if (!keys->d) {
+ bch_err(c, "Failed to allocate %zu size buffer for sorted journal keys; exiting",
+ keys->size);
+ return -BCH_ERR_ENOMEM_journal_keys_sort;
+ }
+ }
+
+ genradix_for_each(&c->journal_entries, iter, _i) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ cond_resched();
+
+ for_each_jset_key(k, entry, &i->j) {
+ if (keys->nr == keys->size) {
+ __journal_keys_sort(keys);
+
+ if (keys->nr > keys->size * 7 / 8) {
+ bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu/%zu",
+ keys->nr, keys->size, nr_read, nr_keys);
+ return -BCH_ERR_ENOMEM_journal_keys_sort;
+ }
+ }
+
+ keys->d[keys->nr++] = (struct journal_key) {
+ .btree_id = entry->btree_id,
+ .level = entry->level,
+ .k = k,
+ .journal_seq = le64_to_cpu(i->j.seq),
+ .journal_offset = k->_data - i->j._data,
+ };
+
+ nr_read++;
+ }
+ }
+
+ __journal_keys_sort(keys);
+ keys->gap = keys->nr;
+
+ bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_keys, keys->nr);
+ return 0;
+}
diff --git a/fs/bcachefs/btree_journal_iter.h b/fs/bcachefs/btree_journal_iter.h
new file mode 100644
index 000000000000..5d64e7e22f26
--- /dev/null
+++ b/fs/bcachefs/btree_journal_iter.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_JOURNAL_ITER_H
+#define _BCACHEFS_BTREE_JOURNAL_ITER_H
+
+struct journal_iter {
+ struct list_head list;
+ enum btree_id btree_id;
+ unsigned level;
+ size_t idx;
+ struct journal_keys *keys;
+};
+
+/*
+ * Iterate over keys in the btree, with keys from the journal overlaid on top:
+ */
+
+struct btree_and_journal_iter {
+ struct btree *b;
+ struct btree_node_iter node_iter;
+ struct bkey unpacked;
+
+ struct journal_iter journal;
+ struct bpos pos;
+ bool at_end;
+};
+
+struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *, enum btree_id,
+ unsigned, struct bpos, struct bpos, size_t *);
+struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *, enum btree_id,
+ unsigned, struct bpos);
+
+int bch2_journal_key_insert_take(struct bch_fs *, enum btree_id,
+ unsigned, struct bkey_i *);
+int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
+ unsigned, struct bkey_i *);
+int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
+ unsigned, struct bpos);
+void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id,
+ unsigned, struct bpos);
+
+void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *);
+struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *);
+
+void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *);
+void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *,
+ struct bch_fs *, struct btree *,
+ struct btree_node_iter, struct bpos);
+void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *,
+ struct bch_fs *,
+ struct btree *);
+
+void bch2_journal_keys_free(struct journal_keys *);
+void bch2_journal_entries_free(struct bch_fs *);
+
+int bch2_journal_keys_sort(struct bch_fs *);
+
+#endif /* _BCACHEFS_BTREE_JOURNAL_ITER_H */
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
new file mode 100644
index 000000000000..29a0b566a4fe
--- /dev/null
+++ b/fs/bcachefs/btree_key_cache.c
@@ -0,0 +1,1072 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_cache.h"
+#include "btree_iter.h"
+#include "btree_key_cache.h"
+#include "btree_locking.h"
+#include "btree_update.h"
+#include "errcode.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_reclaim.h"
+#include "trace.h"
+
+#include <linux/sched/mm.h>
+
+static inline bool btree_uses_pcpu_readers(enum btree_id id)
+{
+ return id == BTREE_ID_subvolumes;
+}
+
+static struct kmem_cache *bch2_key_cache;
+
+static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct bkey_cached *ck = obj;
+ const struct bkey_cached_key *key = arg->key;
+
+ return ck->key.btree_id != key->btree_id ||
+ !bpos_eq(ck->key.pos, key->pos);
+}
+
+static const struct rhashtable_params bch2_btree_key_cache_params = {
+ .head_offset = offsetof(struct bkey_cached, hash),
+ .key_offset = offsetof(struct bkey_cached, key),
+ .key_len = sizeof(struct bkey_cached_key),
+ .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
+};
+
+__flatten
+inline struct bkey_cached *
+bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
+{
+ struct bkey_cached_key key = {
+ .btree_id = btree_id,
+ .pos = pos,
+ };
+
+ return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
+ bch2_btree_key_cache_params);
+}
+
+static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
+{
+ if (!six_trylock_intent(&ck->c.lock))
+ return false;
+
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ six_unlock_intent(&ck->c.lock);
+ return false;
+ }
+
+ if (!six_trylock_write(&ck->c.lock)) {
+ six_unlock_intent(&ck->c.lock);
+ return false;
+ }
+
+ return true;
+}
+
+static void bkey_cached_evict(struct btree_key_cache *c,
+ struct bkey_cached *ck)
+{
+ BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
+ bch2_btree_key_cache_params));
+ memset(&ck->key, ~0, sizeof(ck->key));
+
+ atomic_long_dec(&c->nr_keys);
+}
+
+static void bkey_cached_free(struct btree_key_cache *bc,
+ struct bkey_cached *ck)
+{
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
+
+ BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
+
+ ck->btree_trans_barrier_seq =
+ start_poll_synchronize_srcu(&c->btree_trans_barrier);
+
+ if (ck->c.lock.readers)
+ list_move_tail(&ck->list, &bc->freed_pcpu);
+ else
+ list_move_tail(&ck->list, &bc->freed_nonpcpu);
+ atomic_long_inc(&bc->nr_freed);
+
+ kfree(ck->k);
+ ck->k = NULL;
+ ck->u64s = 0;
+
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
+}
+
+#ifdef __KERNEL__
+static void __bkey_cached_move_to_freelist_ordered(struct btree_key_cache *bc,
+ struct bkey_cached *ck)
+{
+ struct bkey_cached *pos;
+
+ list_for_each_entry_reverse(pos, &bc->freed_nonpcpu, list) {
+ if (ULONG_CMP_GE(ck->btree_trans_barrier_seq,
+ pos->btree_trans_barrier_seq)) {
+ list_move(&ck->list, &pos->list);
+ return;
+ }
+ }
+
+ list_move(&ck->list, &bc->freed_nonpcpu);
+}
+#endif
+
+static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
+ struct bkey_cached *ck)
+{
+ BUG_ON(test_bit(BKEY_CACHED_DIRTY, &ck->flags));
+
+ if (!ck->c.lock.readers) {
+#ifdef __KERNEL__
+ struct btree_key_cache_freelist *f;
+ bool freed = false;
+
+ preempt_disable();
+ f = this_cpu_ptr(bc->pcpu_freed);
+
+ if (f->nr < ARRAY_SIZE(f->objs)) {
+ f->objs[f->nr++] = ck;
+ freed = true;
+ }
+ preempt_enable();
+
+ if (!freed) {
+ mutex_lock(&bc->lock);
+ preempt_disable();
+ f = this_cpu_ptr(bc->pcpu_freed);
+
+ while (f->nr > ARRAY_SIZE(f->objs) / 2) {
+ struct bkey_cached *ck2 = f->objs[--f->nr];
+
+ __bkey_cached_move_to_freelist_ordered(bc, ck2);
+ }
+ preempt_enable();
+
+ __bkey_cached_move_to_freelist_ordered(bc, ck);
+ mutex_unlock(&bc->lock);
+ }
+#else
+ mutex_lock(&bc->lock);
+ list_move_tail(&ck->list, &bc->freed_nonpcpu);
+ mutex_unlock(&bc->lock);
+#endif
+ } else {
+ mutex_lock(&bc->lock);
+ list_move_tail(&ck->list, &bc->freed_pcpu);
+ mutex_unlock(&bc->lock);
+ }
+}
+
+static void bkey_cached_free_fast(struct btree_key_cache *bc,
+ struct bkey_cached *ck)
+{
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
+
+ ck->btree_trans_barrier_seq =
+ start_poll_synchronize_srcu(&c->btree_trans_barrier);
+
+ list_del_init(&ck->list);
+ atomic_long_inc(&bc->nr_freed);
+
+ kfree(ck->k);
+ ck->k = NULL;
+ ck->u64s = 0;
+
+ bkey_cached_move_to_freelist(bc, ck);
+
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
+}
+
+static struct bkey_cached *
+bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
+ bool *was_new)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_key_cache *bc = &c->btree_key_cache;
+ struct bkey_cached *ck = NULL;
+ bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
+ int ret;
+
+ if (!pcpu_readers) {
+#ifdef __KERNEL__
+ struct btree_key_cache_freelist *f;
+
+ preempt_disable();
+ f = this_cpu_ptr(bc->pcpu_freed);
+ if (f->nr)
+ ck = f->objs[--f->nr];
+ preempt_enable();
+
+ if (!ck) {
+ mutex_lock(&bc->lock);
+ preempt_disable();
+ f = this_cpu_ptr(bc->pcpu_freed);
+
+ while (!list_empty(&bc->freed_nonpcpu) &&
+ f->nr < ARRAY_SIZE(f->objs) / 2) {
+ ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
+ list_del_init(&ck->list);
+ f->objs[f->nr++] = ck;
+ }
+
+ ck = f->nr ? f->objs[--f->nr] : NULL;
+ preempt_enable();
+ mutex_unlock(&bc->lock);
+ }
+#else
+ mutex_lock(&bc->lock);
+ if (!list_empty(&bc->freed_nonpcpu)) {
+ ck = list_last_entry(&bc->freed_nonpcpu, struct bkey_cached, list);
+ list_del_init(&ck->list);
+ }
+ mutex_unlock(&bc->lock);
+#endif
+ } else {
+ mutex_lock(&bc->lock);
+ if (!list_empty(&bc->freed_pcpu)) {
+ ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
+ list_del_init(&ck->list);
+ }
+ mutex_unlock(&bc->lock);
+ }
+
+ if (ck) {
+ ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent, _THIS_IP_);
+ if (unlikely(ret)) {
+ bkey_cached_move_to_freelist(bc, ck);
+ return ERR_PTR(ret);
+ }
+
+ path->l[0].b = (void *) ck;
+ path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
+ mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
+
+ ret = bch2_btree_node_lock_write(trans, path, &ck->c);
+ if (unlikely(ret)) {
+ btree_node_unlock(trans, path, 0);
+ bkey_cached_move_to_freelist(bc, ck);
+ return ERR_PTR(ret);
+ }
+
+ return ck;
+ }
+
+ ck = allocate_dropping_locks(trans, ret,
+ kmem_cache_zalloc(bch2_key_cache, _gfp));
+ if (ret) {
+ kmem_cache_free(bch2_key_cache, ck);
+ return ERR_PTR(ret);
+ }
+
+ if (!ck)
+ return NULL;
+
+ INIT_LIST_HEAD(&ck->list);
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
+
+ ck->c.cached = true;
+ BUG_ON(!six_trylock_intent(&ck->c.lock));
+ BUG_ON(!six_trylock_write(&ck->c.lock));
+ *was_new = true;
+ return ck;
+}
+
+static struct bkey_cached *
+bkey_cached_reuse(struct btree_key_cache *c)
+{
+ struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct bkey_cached *ck;
+ unsigned i;
+
+ mutex_lock(&c->lock);
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(c->table.tbl, &c->table);
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
+ bkey_cached_lock_for_evict(ck)) {
+ bkey_cached_evict(c, ck);
+ goto out;
+ }
+ }
+ ck = NULL;
+out:
+ rcu_read_unlock();
+ mutex_unlock(&c->lock);
+ return ck;
+}
+
+static struct bkey_cached *
+btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_key_cache *bc = &c->btree_key_cache;
+ struct bkey_cached *ck;
+ bool was_new = false;
+
+ ck = bkey_cached_alloc(trans, path, &was_new);
+ if (IS_ERR(ck))
+ return ck;
+
+ if (unlikely(!ck)) {
+ ck = bkey_cached_reuse(bc);
+ if (unlikely(!ck)) {
+ bch_err(c, "error allocating memory for key cache item, btree %s",
+ bch2_btree_ids[path->btree_id]);
+ return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create);
+ }
+
+ mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
+ }
+
+ ck->c.level = 0;
+ ck->c.btree_id = path->btree_id;
+ ck->key.btree_id = path->btree_id;
+ ck->key.pos = path->pos;
+ ck->valid = false;
+ ck->flags = 1U << BKEY_CACHED_ACCESSED;
+
+ if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
+ &ck->hash,
+ bch2_btree_key_cache_params))) {
+ /* We raced with another fill: */
+
+ if (likely(was_new)) {
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
+ kfree(ck);
+ } else {
+ bkey_cached_free_fast(bc, ck);
+ }
+
+ mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
+ return NULL;
+ }
+
+ atomic_long_inc(&bc->nr_keys);
+
+ six_unlock_write(&ck->c.lock);
+
+ return ck;
+}
+
+static int btree_key_cache_fill(struct btree_trans *trans,
+ struct btree_path *ck_path,
+ struct bkey_cached *ck)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ unsigned new_u64s = 0;
+ struct bkey_i *new_k = NULL;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, ck->key.btree_id, ck->key.pos,
+ BTREE_ITER_KEY_CACHE_FILL|
+ BTREE_ITER_CACHED_NOFILL);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!bch2_btree_node_relock(trans, ck_path, 0)) {
+ trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
+ goto err;
+ }
+
+ /*
+ * bch2_varint_decode can read past the end of the buffer by at
+ * most 7 bytes (it won't be used):
+ */
+ new_u64s = k.k->u64s + 1;
+
+ /*
+ * Allocate some extra space so that the transaction commit path is less
+ * likely to have to reallocate, since that requires a transaction
+ * restart:
+ */
+ new_u64s = min(256U, (new_u64s * 3) / 2);
+
+ if (new_u64s > ck->u64s) {
+ new_u64s = roundup_pow_of_two(new_u64s);
+ new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOWAIT|__GFP_NOWARN);
+ if (!new_k) {
+ bch2_trans_unlock(trans);
+
+ new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
+ if (!new_k) {
+ bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
+ bch2_btree_ids[ck->key.btree_id], new_u64s);
+ ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
+ goto err;
+ }
+
+ if (!bch2_btree_node_relock(trans, ck_path, 0)) {
+ kfree(new_k);
+ trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
+ goto err;
+ }
+
+ ret = bch2_trans_relock(trans);
+ if (ret) {
+ kfree(new_k);
+ goto err;
+ }
+ }
+ }
+
+ ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
+ if (ret) {
+ kfree(new_k);
+ goto err;
+ }
+
+ if (new_k) {
+ kfree(ck->k);
+ ck->u64s = new_u64s;
+ ck->k = new_k;
+ }
+
+ bkey_reassemble(ck->k, k);
+ ck->valid = true;
+ bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
+
+ /* We're not likely to need this iterator again: */
+ set_btree_iter_dontneed(&iter);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static noinline int
+bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached *ck;
+ int ret = 0;
+
+ BUG_ON(path->level);
+
+ path->l[1].b = NULL;
+
+ if (bch2_btree_node_relock_notrace(trans, path, 0)) {
+ ck = (void *) path->l[0].b;
+ goto fill;
+ }
+retry:
+ ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
+ if (!ck) {
+ ck = btree_key_cache_create(trans, path);
+ ret = PTR_ERR_OR_ZERO(ck);
+ if (ret)
+ goto err;
+ if (!ck)
+ goto retry;
+
+ mark_btree_node_locked(trans, path, 0, BTREE_NODE_INTENT_LOCKED);
+ path->locks_want = 1;
+ } else {
+ enum six_lock_type lock_want = __btree_lock_want(path, 0);
+
+ ret = btree_node_lock(trans, path, (void *) ck, 0,
+ lock_want, _THIS_IP_);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto err;
+
+ BUG_ON(ret);
+
+ if (ck->key.btree_id != path->btree_id ||
+ !bpos_eq(ck->key.pos, path->pos)) {
+ six_unlock_type(&ck->c.lock, lock_want);
+ goto retry;
+ }
+
+ mark_btree_node_locked(trans, path, 0,
+ (enum btree_node_locked_type) lock_want);
+ }
+
+ path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
+ path->l[0].b = (void *) ck;
+fill:
+ path->uptodate = BTREE_ITER_UPTODATE;
+
+ if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
+ /*
+ * Using the underscore version because we haven't set
+ * path->uptodate yet:
+ */
+ if (!path->locks_want &&
+ !__bch2_btree_path_upgrade(trans, path, 1)) {
+ trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
+ goto err;
+ }
+
+ ret = btree_key_cache_fill(trans, path, ck);
+ if (ret)
+ goto err;
+
+ ret = bch2_btree_path_relock(trans, path, _THIS_IP_);
+ if (ret)
+ goto err;
+
+ path->uptodate = BTREE_ITER_UPTODATE;
+ }
+
+ if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+ set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
+
+ BUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
+ BUG_ON(path->uptodate);
+
+ return ret;
+err:
+ path->uptodate = BTREE_ITER_NEED_TRAVERSE;
+ if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ btree_node_unlock(trans, path, 0);
+ path->l[0].b = ERR_PTR(ret);
+ }
+ return ret;
+}
+
+int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached *ck;
+ int ret = 0;
+
+ EBUG_ON(path->level);
+
+ path->l[1].b = NULL;
+
+ if (bch2_btree_node_relock_notrace(trans, path, 0)) {
+ ck = (void *) path->l[0].b;
+ goto fill;
+ }
+retry:
+ ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
+ if (!ck) {
+ return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
+ } else {
+ enum six_lock_type lock_want = __btree_lock_want(path, 0);
+
+ ret = btree_node_lock(trans, path, (void *) ck, 0,
+ lock_want, _THIS_IP_);
+ EBUG_ON(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart));
+
+ if (ret)
+ return ret;
+
+ if (ck->key.btree_id != path->btree_id ||
+ !bpos_eq(ck->key.pos, path->pos)) {
+ six_unlock_type(&ck->c.lock, lock_want);
+ goto retry;
+ }
+
+ mark_btree_node_locked(trans, path, 0,
+ (enum btree_node_locked_type) lock_want);
+ }
+
+ path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
+ path->l[0].b = (void *) ck;
+fill:
+ if (!ck->valid)
+ return bch2_btree_path_traverse_cached_slowpath(trans, path, flags);
+
+ if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+ set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
+
+ path->uptodate = BTREE_ITER_UPTODATE;
+ EBUG_ON(!ck->valid);
+ EBUG_ON(btree_node_locked_type(path, 0) != btree_lock_want(path, 0));
+
+ return ret;
+}
+
+static int btree_key_cache_flush_pos(struct btree_trans *trans,
+ struct bkey_cached_key key,
+ u64 journal_seq,
+ unsigned commit_flags,
+ bool evict)
+{
+ struct bch_fs *c = trans->c;
+ struct journal *j = &c->journal;
+ struct btree_iter c_iter, b_iter;
+ struct bkey_cached *ck = NULL;
+ int ret;
+
+ bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
+ BTREE_ITER_SLOTS|
+ BTREE_ITER_INTENT|
+ BTREE_ITER_ALL_SNAPSHOTS);
+ bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+ b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+
+ ret = bch2_btree_iter_traverse(&c_iter);
+ if (ret)
+ goto out;
+
+ ck = (void *) c_iter.path->l[0].b;
+ if (!ck)
+ goto out;
+
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ if (evict)
+ goto evict;
+ goto out;
+ }
+
+ BUG_ON(!ck->valid);
+
+ if (journal_seq && ck->journal.seq != journal_seq)
+ goto out;
+
+ /*
+ * Since journal reclaim depends on us making progress here, and the
+ * allocator/copygc depend on journal reclaim making progress, we need
+ * to be using alloc reserves:
+ */
+ ret = bch2_btree_iter_traverse(&b_iter) ?:
+ bch2_trans_update(trans, &b_iter, ck->k,
+ BTREE_UPDATE_KEY_CACHE_RECLAIM|
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ BTREE_TRIGGER_NORUN) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL|
+ (ck->journal.seq == journal_last_seq(j)
+ ? BCH_WATERMARK_reclaim
+ : 0)|
+ commit_flags);
+
+ bch2_fs_fatal_err_on(ret &&
+ !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
+ !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
+ !bch2_journal_error(j), c,
+ "error flushing key cache: %s", bch2_err_str(ret));
+ if (ret)
+ goto out;
+
+ bch2_journal_pin_drop(j, &ck->journal);
+ bch2_journal_preres_put(j, &ck->res);
+
+ BUG_ON(!btree_node_locked(c_iter.path, 0));
+
+ if (!evict) {
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ atomic_long_dec(&c->btree_key_cache.nr_dirty);
+ }
+ } else {
+ struct btree_path *path2;
+evict:
+ trans_for_each_path(trans, path2)
+ if (path2 != c_iter.path)
+ __bch2_btree_path_unlock(trans, path2);
+
+ bch2_btree_node_lock_write_nofail(trans, c_iter.path, &ck->c);
+
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ atomic_long_dec(&c->btree_key_cache.nr_dirty);
+ }
+
+ mark_btree_node_locked_noreset(c_iter.path, 0, BTREE_NODE_UNLOCKED);
+ bkey_cached_evict(&c->btree_key_cache, ck);
+ bkey_cached_free_fast(&c->btree_key_cache, ck);
+ }
+out:
+ bch2_trans_iter_exit(trans, &b_iter);
+ bch2_trans_iter_exit(trans, &c_iter);
+ return ret;
+}
+
+int bch2_btree_key_cache_journal_flush(struct journal *j,
+ struct journal_entry_pin *pin, u64 seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bkey_cached *ck =
+ container_of(pin, struct bkey_cached, journal);
+ struct bkey_cached_key key;
+ struct btree_trans *trans = bch2_trans_get(c);
+ int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ int ret = 0;
+
+ btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read);
+ key = ck->key;
+
+ if (ck->journal.seq != seq ||
+ !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ six_unlock_read(&ck->c.lock);
+ goto unlock;
+ }
+
+ if (ck->seq != seq) {
+ bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal,
+ bch2_btree_key_cache_journal_flush);
+ six_unlock_read(&ck->c.lock);
+ goto unlock;
+ }
+ six_unlock_read(&ck->c.lock);
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ btree_key_cache_flush_pos(trans, key, seq,
+ BTREE_INSERT_JOURNAL_RECLAIM, false));
+unlock:
+ srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+/*
+ * Flush and evict a key from the key cache:
+ */
+int bch2_btree_key_cache_flush(struct btree_trans *trans,
+ enum btree_id id, struct bpos pos)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached_key key = { id, pos };
+
+ /* Fastpath - assume it won't be found: */
+ if (!bch2_btree_key_cache_find(c, id, pos))
+ return 0;
+
+ return btree_key_cache_flush_pos(trans, key, 0, 0, true);
+}
+
+bool bch2_btree_insert_key_cached(struct btree_trans *trans,
+ unsigned flags,
+ struct btree_insert_entry *insert_entry)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached *ck = (void *) insert_entry->path->l[0].b;
+ struct bkey_i *insert = insert_entry->k;
+ bool kick_reclaim = false;
+
+ BUG_ON(insert->k.u64s > ck->u64s);
+
+ if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
+ int difference;
+
+ BUG_ON(jset_u64s(insert->k.u64s) > trans->journal_preres.u64s);
+
+ difference = jset_u64s(insert->k.u64s) - ck->res.u64s;
+ if (difference > 0) {
+ trans->journal_preres.u64s -= difference;
+ ck->res.u64s += difference;
+ }
+ }
+
+ bkey_copy(ck->k, insert);
+ ck->valid = true;
+
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
+ set_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ atomic_long_inc(&c->btree_key_cache.nr_dirty);
+
+ if (bch2_nr_btree_keys_need_flush(c))
+ kick_reclaim = true;
+ }
+
+ /*
+ * To minimize lock contention, we only add the journal pin here and
+ * defer pin updates to the flush callback via ->seq. Be careful not to
+ * update ->seq on nojournal commits because we don't want to update the
+ * pin to a seq that doesn't include journal updates on disk. Otherwise
+ * we risk losing the update after a crash.
+ *
+ * The only exception is if the pin is not active in the first place. We
+ * have to add the pin because journal reclaim drives key cache
+ * flushing. The flush callback will not proceed unless ->seq matches
+ * the latest pin, so make sure it starts with a consistent value.
+ */
+ if (!(insert_entry->flags & BTREE_UPDATE_NOJOURNAL) ||
+ !journal_pin_active(&ck->journal)) {
+ ck->seq = trans->journal_res.seq;
+ }
+ bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
+ &ck->journal, bch2_btree_key_cache_journal_flush);
+
+ if (kick_reclaim)
+ journal_reclaim_kick(&c->journal);
+ return true;
+}
+
+void bch2_btree_key_cache_drop(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached *ck = (void *) path->l[0].b;
+
+ BUG_ON(!ck->valid);
+
+ /*
+ * We just did an update to the btree, bypassing the key cache: the key
+ * cache key is now stale and must be dropped, even if dirty:
+ */
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ atomic_long_dec(&c->btree_key_cache.nr_dirty);
+ bch2_journal_pin_drop(&c->journal, &ck->journal);
+ }
+
+ ck->valid = false;
+}
+
+static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct bch_fs *c = container_of(shrink, struct bch_fs,
+ btree_key_cache.shrink);
+ struct btree_key_cache *bc = &c->btree_key_cache;
+ struct bucket_table *tbl;
+ struct bkey_cached *ck, *t;
+ size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
+ unsigned start, flags;
+ int srcu_idx;
+
+ mutex_lock(&bc->lock);
+ srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+ flags = memalloc_nofs_save();
+
+ /*
+ * Newest freed entries are at the end of the list - once we hit one
+ * that's too new to be freed, we can bail out:
+ */
+ list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
+ if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
+ ck->btree_trans_barrier_seq))
+ break;
+
+ list_del(&ck->list);
+ six_lock_exit(&ck->c.lock);
+ kmem_cache_free(bch2_key_cache, ck);
+ atomic_long_dec(&bc->nr_freed);
+ scanned++;
+ freed++;
+ }
+
+ if (scanned >= nr)
+ goto out;
+
+ list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
+ if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
+ ck->btree_trans_barrier_seq))
+ break;
+
+ list_del(&ck->list);
+ six_lock_exit(&ck->c.lock);
+ kmem_cache_free(bch2_key_cache, ck);
+ atomic_long_dec(&bc->nr_freed);
+ scanned++;
+ freed++;
+ }
+
+ if (scanned >= nr)
+ goto out;
+
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
+ if (bc->shrink_iter >= tbl->size)
+ bc->shrink_iter = 0;
+ start = bc->shrink_iter;
+
+ do {
+ struct rhash_head *pos, *next;
+
+ pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter));
+
+ while (!rht_is_a_nulls(pos)) {
+ next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
+ ck = container_of(pos, struct bkey_cached, hash);
+
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
+ goto next;
+
+ if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+ clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
+ else if (bkey_cached_lock_for_evict(ck)) {
+ bkey_cached_evict(bc, ck);
+ bkey_cached_free(bc, ck);
+ }
+
+ scanned++;
+ if (scanned >= nr)
+ break;
+next:
+ pos = next;
+ }
+
+ bc->shrink_iter++;
+ if (bc->shrink_iter >= tbl->size)
+ bc->shrink_iter = 0;
+ } while (scanned < nr && bc->shrink_iter != start);
+
+ rcu_read_unlock();
+out:
+ memalloc_nofs_restore(flags);
+ srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
+ mutex_unlock(&bc->lock);
+
+ return freed;
+}
+
+static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct bch_fs *c = container_of(shrink, struct bch_fs,
+ btree_key_cache.shrink);
+ struct btree_key_cache *bc = &c->btree_key_cache;
+ long nr = atomic_long_read(&bc->nr_keys) -
+ atomic_long_read(&bc->nr_dirty);
+
+ return max(0L, nr);
+}
+
+void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
+{
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
+ struct bucket_table *tbl;
+ struct bkey_cached *ck, *n;
+ struct rhash_head *pos;
+ LIST_HEAD(items);
+ unsigned i;
+#ifdef __KERNEL__
+ int cpu;
+#endif
+
+ unregister_shrinker(&bc->shrink);
+
+ mutex_lock(&bc->lock);
+
+ /*
+ * The loop is needed to guard against racing with rehash:
+ */
+ while (atomic_long_read(&bc->nr_keys)) {
+ rcu_read_lock();
+ tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
+ if (tbl)
+ for (i = 0; i < tbl->size; i++)
+ rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
+ bkey_cached_evict(bc, ck);
+ list_add(&ck->list, &items);
+ }
+ rcu_read_unlock();
+ }
+
+#ifdef __KERNEL__
+ for_each_possible_cpu(cpu) {
+ struct btree_key_cache_freelist *f =
+ per_cpu_ptr(bc->pcpu_freed, cpu);
+
+ for (i = 0; i < f->nr; i++) {
+ ck = f->objs[i];
+ list_add(&ck->list, &items);
+ }
+ }
+#endif
+
+ list_splice(&bc->freed_pcpu, &items);
+ list_splice(&bc->freed_nonpcpu, &items);
+
+ mutex_unlock(&bc->lock);
+
+ list_for_each_entry_safe(ck, n, &items, list) {
+ cond_resched();
+
+ bch2_journal_pin_drop(&c->journal, &ck->journal);
+ bch2_journal_preres_put(&c->journal, &ck->res);
+
+ list_del(&ck->list);
+ kfree(ck->k);
+ six_lock_exit(&ck->c.lock);
+ kmem_cache_free(bch2_key_cache, ck);
+ }
+
+ if (atomic_long_read(&bc->nr_dirty) &&
+ !bch2_journal_error(&c->journal) &&
+ test_bit(BCH_FS_WAS_RW, &c->flags))
+ panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
+ atomic_long_read(&bc->nr_dirty));
+
+ if (atomic_long_read(&bc->nr_keys))
+ panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
+ atomic_long_read(&bc->nr_keys));
+
+ if (bc->table_init_done)
+ rhashtable_destroy(&bc->table);
+
+ free_percpu(bc->pcpu_freed);
+}
+
+void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
+{
+ mutex_init(&c->lock);
+ INIT_LIST_HEAD(&c->freed_pcpu);
+ INIT_LIST_HEAD(&c->freed_nonpcpu);
+}
+
+int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
+{
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
+
+#ifdef __KERNEL__
+ bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
+ if (!bc->pcpu_freed)
+ return -BCH_ERR_ENOMEM_fs_btree_cache_init;
+#endif
+
+ if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
+ return -BCH_ERR_ENOMEM_fs_btree_cache_init;
+
+ bc->table_init_done = true;
+
+ bc->shrink.seeks = 0;
+ bc->shrink.count_objects = bch2_btree_key_cache_count;
+ bc->shrink.scan_objects = bch2_btree_key_cache_scan;
+ if (register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name))
+ return -BCH_ERR_ENOMEM_fs_btree_cache_init;
+ return 0;
+}
+
+void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
+{
+ prt_printf(out, "nr_freed:\t%lu", atomic_long_read(&c->nr_freed));
+ prt_newline(out);
+ prt_printf(out, "nr_keys:\t%lu", atomic_long_read(&c->nr_keys));
+ prt_newline(out);
+ prt_printf(out, "nr_dirty:\t%lu", atomic_long_read(&c->nr_dirty));
+ prt_newline(out);
+}
+
+void bch2_btree_key_cache_exit(void)
+{
+ kmem_cache_destroy(bch2_key_cache);
+}
+
+int __init bch2_btree_key_cache_init(void)
+{
+ bch2_key_cache = KMEM_CACHE(bkey_cached, SLAB_RECLAIM_ACCOUNT);
+ if (!bch2_key_cache)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h
new file mode 100644
index 000000000000..be3acde2caa0
--- /dev/null
+++ b/fs/bcachefs/btree_key_cache.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_KEY_CACHE_H
+#define _BCACHEFS_BTREE_KEY_CACHE_H
+
+static inline size_t bch2_nr_btree_keys_need_flush(struct bch_fs *c)
+{
+ size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
+ size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
+ size_t max_dirty = 1024 + nr_keys / 2;
+
+ return max_t(ssize_t, 0, nr_dirty - max_dirty);
+}
+
+static inline bool bch2_btree_key_cache_must_wait(struct bch_fs *c)
+{
+ size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
+ size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
+ size_t max_dirty = 4096 + (nr_keys * 3) / 4;
+
+ return nr_dirty > max_dirty;
+}
+
+int bch2_btree_key_cache_journal_flush(struct journal *,
+ struct journal_entry_pin *, u64);
+
+struct bkey_cached *
+bch2_btree_key_cache_find(struct bch_fs *, enum btree_id, struct bpos);
+
+int bch2_btree_path_traverse_cached(struct btree_trans *, struct btree_path *,
+ unsigned);
+
+bool bch2_btree_insert_key_cached(struct btree_trans *, unsigned,
+ struct btree_insert_entry *);
+int bch2_btree_key_cache_flush(struct btree_trans *,
+ enum btree_id, struct bpos);
+void bch2_btree_key_cache_drop(struct btree_trans *,
+ struct btree_path *);
+
+void bch2_fs_btree_key_cache_exit(struct btree_key_cache *);
+void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *);
+int bch2_fs_btree_key_cache_init(struct btree_key_cache *);
+
+void bch2_btree_key_cache_to_text(struct printbuf *, struct btree_key_cache *);
+
+void bch2_btree_key_cache_exit(void);
+int __init bch2_btree_key_cache_init(void);
+
+#endif /* _BCACHEFS_BTREE_KEY_CACHE_H */
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
new file mode 100644
index 000000000000..40c8ed8f7bf1
--- /dev/null
+++ b/fs/bcachefs/btree_locking.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_locking.h"
+#include "btree_types.h"
+
+static struct lock_class_key bch2_btree_node_lock_key;
+
+void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
+ enum six_lock_init_flags flags)
+{
+ __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
+ lockdep_set_novalidate_class(&b->lock);
+}
+
+#ifdef CONFIG_LOCKDEP
+void bch2_assert_btree_nodes_not_locked(void)
+{
+#if 0
+ //Re-enable when lock_class_is_held() is merged:
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
+#endif
+}
+#endif
+
+/* Btree node locking: */
+
+struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
+ struct btree_path *skip,
+ struct btree_bkey_cached_common *b,
+ unsigned level)
+{
+ struct btree_path *path;
+ struct six_lock_count ret;
+
+ memset(&ret, 0, sizeof(ret));
+
+ if (IS_ERR_OR_NULL(b))
+ return ret;
+
+ trans_for_each_path(trans, path)
+ if (path != skip && &path->l[level].b->c == b) {
+ int t = btree_node_locked_type(path, level);
+
+ if (t != BTREE_NODE_UNLOCKED)
+ ret.n[t]++;
+ }
+
+ return ret;
+}
+
+/* unlock */
+
+void bch2_btree_node_unlock_write(struct btree_trans *trans,
+ struct btree_path *path, struct btree *b)
+{
+ bch2_btree_node_unlock_write_inlined(trans, path, b);
+}
+
+/* lock */
+
+/*
+ * @trans wants to lock @b with type @type
+ */
+struct trans_waiting_for_lock {
+ struct btree_trans *trans;
+ struct btree_bkey_cached_common *node_want;
+ enum six_lock_type lock_want;
+
+ /* for iterating over held locks :*/
+ u8 path_idx;
+ u8 level;
+ u64 lock_start_time;
+};
+
+struct lock_graph {
+ struct trans_waiting_for_lock g[8];
+ unsigned nr;
+};
+
+static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
+{
+ struct trans_waiting_for_lock *i;
+
+ prt_printf(out, "Found lock cycle (%u entries):", g->nr);
+ prt_newline(out);
+
+ for (i = g->g; i < g->g + g->nr; i++)
+ bch2_btree_trans_to_text(out, i->trans);
+}
+
+static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
+{
+ struct trans_waiting_for_lock *i;
+
+ for (i = g->g; i != g->g + g->nr; i++) {
+ if (i != g->g)
+ prt_str(out, "<- ");
+ prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
+ }
+ prt_newline(out);
+}
+
+static void lock_graph_up(struct lock_graph *g)
+{
+ closure_put(&g->g[--g->nr].trans->ref);
+}
+
+static noinline void lock_graph_pop_all(struct lock_graph *g)
+{
+ while (g->nr)
+ lock_graph_up(g);
+}
+
+static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
+ g->g[g->nr++] = (struct trans_waiting_for_lock) {
+ .trans = trans,
+ .node_want = trans->locking,
+ .lock_want = trans->locking_wait.lock_want,
+ };
+}
+
+static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
+ closure_get(&trans->ref);
+ __lock_graph_down(g, trans);
+}
+
+static bool lock_graph_remove_non_waiters(struct lock_graph *g)
+{
+ struct trans_waiting_for_lock *i;
+
+ for (i = g->g + 1; i < g->g + g->nr; i++)
+ if (i->trans->locking != i->node_want ||
+ i->trans->locking_wait.start_time != i[-1].lock_start_time) {
+ while (g->g + g->nr > i)
+ lock_graph_up(g);
+ return true;
+ }
+
+ return false;
+}
+
+static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
+{
+ if (i == g->g) {
+ trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
+ return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
+ } else {
+ i->trans->lock_must_abort = true;
+ wake_up_process(i->trans->locking_wait.task);
+ return 0;
+ }
+}
+
+static int btree_trans_abort_preference(struct btree_trans *trans)
+{
+ if (trans->lock_may_not_fail)
+ return 0;
+ if (trans->locking_wait.lock_want == SIX_LOCK_write)
+ return 1;
+ if (!trans->in_traverse_all)
+ return 2;
+ return 3;
+}
+
+static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
+{
+ struct trans_waiting_for_lock *i, *abort = NULL;
+ unsigned best = 0, pref;
+ int ret;
+
+ if (lock_graph_remove_non_waiters(g))
+ return 0;
+
+ /* Only checking, for debugfs: */
+ if (cycle) {
+ print_cycle(cycle, g);
+ ret = -1;
+ goto out;
+ }
+
+ for (i = g->g; i < g->g + g->nr; i++) {
+ pref = btree_trans_abort_preference(i->trans);
+ if (pref > best) {
+ abort = i;
+ best = pref;
+ }
+ }
+
+ if (unlikely(!best)) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
+
+ for (i = g->g; i < g->g + g->nr; i++) {
+ struct btree_trans *trans = i->trans;
+
+ bch2_btree_trans_to_text(&buf, trans);
+
+ prt_printf(&buf, "backtrace:");
+ prt_newline(&buf);
+ printbuf_indent_add(&buf, 2);
+ bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
+ printbuf_indent_sub(&buf, 2);
+ prt_newline(&buf);
+ }
+
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+ BUG();
+ }
+
+ ret = abort_lock(g, abort);
+out:
+ if (ret)
+ while (g->nr)
+ lock_graph_up(g);
+ return ret;
+}
+
+static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
+ struct printbuf *cycle)
+{
+ struct btree_trans *orig_trans = g->g->trans;
+ struct trans_waiting_for_lock *i;
+
+ for (i = g->g; i < g->g + g->nr; i++)
+ if (i->trans == trans) {
+ closure_put(&trans->ref);
+ return break_cycle(g, cycle);
+ }
+
+ if (g->nr == ARRAY_SIZE(g->g)) {
+ closure_put(&trans->ref);
+
+ if (orig_trans->lock_may_not_fail)
+ return 0;
+
+ while (g->nr)
+ lock_graph_up(g);
+
+ if (cycle)
+ return 0;
+
+ trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
+ return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
+ }
+
+ __lock_graph_down(g, trans);
+ return 0;
+}
+
+static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
+{
+ return t1 + t2 > 1;
+}
+
+int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
+{
+ struct lock_graph g;
+ struct trans_waiting_for_lock *top;
+ struct btree_bkey_cached_common *b;
+ struct btree_path *path;
+ unsigned path_idx;
+ int ret;
+
+ if (trans->lock_must_abort) {
+ if (cycle)
+ return -1;
+
+ trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
+ }
+
+ g.nr = 0;
+ lock_graph_down(&g, trans);
+next:
+ if (!g.nr)
+ return 0;
+
+ top = &g.g[g.nr - 1];
+
+ trans_for_each_path_safe_from(top->trans, path, path_idx, top->path_idx) {
+ if (!path->nodes_locked)
+ continue;
+
+ if (path_idx != top->path_idx) {
+ top->path_idx = path_idx;
+ top->level = 0;
+ top->lock_start_time = 0;
+ }
+
+ for (;
+ top->level < BTREE_MAX_DEPTH;
+ top->level++, top->lock_start_time = 0) {
+ int lock_held = btree_node_locked_type(path, top->level);
+
+ if (lock_held == BTREE_NODE_UNLOCKED)
+ continue;
+
+ b = &READ_ONCE(path->l[top->level].b)->c;
+
+ if (IS_ERR_OR_NULL(b)) {
+ /*
+ * If we get here, it means we raced with the
+ * other thread updating its btree_path
+ * structures - which means it can't be blocked
+ * waiting on a lock:
+ */
+ if (!lock_graph_remove_non_waiters(&g)) {
+ /*
+ * If lock_graph_remove_non_waiters()
+ * didn't do anything, it must be
+ * because we're being called by debugfs
+ * checking for lock cycles, which
+ * invokes us on btree_transactions that
+ * aren't actually waiting on anything.
+ * Just bail out:
+ */
+ lock_graph_pop_all(&g);
+ }
+
+ goto next;
+ }
+
+ if (list_empty_careful(&b->lock.wait_list))
+ continue;
+
+ raw_spin_lock(&b->lock.wait_lock);
+ list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
+ BUG_ON(b != trans->locking);
+
+ if (top->lock_start_time &&
+ time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
+ continue;
+
+ top->lock_start_time = trans->locking_wait.start_time;
+
+ /* Don't check for self deadlock: */
+ if (trans == top->trans ||
+ !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
+ continue;
+
+ closure_get(&trans->ref);
+ raw_spin_unlock(&b->lock.wait_lock);
+
+ ret = lock_graph_descend(&g, trans, cycle);
+ if (ret)
+ return ret;
+ goto next;
+
+ }
+ raw_spin_unlock(&b->lock.wait_lock);
+ }
+ }
+
+ if (g.nr > 1 && cycle)
+ print_chain(cycle, &g);
+ lock_graph_up(&g);
+ goto next;
+}
+
+int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
+{
+ struct btree_trans *trans = p;
+
+ return bch2_check_for_deadlock(trans, NULL);
+}
+
+int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
+ struct btree_bkey_cached_common *b,
+ bool lock_may_not_fail)
+{
+ int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
+ int ret;
+
+ /*
+ * Must drop our read locks before calling six_lock_write() -
+ * six_unlock() won't do wakeups until the reader count
+ * goes to 0, and it's safe because we have the node intent
+ * locked:
+ */
+ six_lock_readers_add(&b->lock, -readers);
+ ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
+ lock_may_not_fail, _RET_IP_);
+ six_lock_readers_add(&b->lock, readers);
+
+ if (ret)
+ mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
+
+ return ret;
+}
+
+void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_bkey_cached_common *b)
+{
+ struct btree_path *linked;
+ unsigned i;
+ int ret;
+
+ /*
+ * XXX BIG FAT NOTICE
+ *
+ * Drop all read locks before taking a write lock:
+ *
+ * This is a hack, because bch2_btree_node_lock_write_nofail() is a
+ * hack - but by dropping read locks first, this should never fail, and
+ * we only use this in code paths where whatever read locks we've
+ * already taken are no longer needed:
+ */
+
+ trans_for_each_path(trans, linked) {
+ if (!linked->nodes_locked)
+ continue;
+
+ for (i = 0; i < BTREE_MAX_DEPTH; i++)
+ if (btree_node_read_locked(linked, i)) {
+ btree_node_unlock(trans, linked, i);
+ btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
+ }
+ }
+
+ ret = __btree_node_lock_write(trans, path, b, true);
+ BUG_ON(ret);
+}
+
+/* relock */
+
+static inline bool btree_path_get_locks(struct btree_trans *trans,
+ struct btree_path *path,
+ bool upgrade)
+{
+ unsigned l = path->level;
+ int fail_idx = -1;
+
+ do {
+ if (!btree_path_node(path, l))
+ break;
+
+ if (!(upgrade
+ ? bch2_btree_node_upgrade(trans, path, l)
+ : bch2_btree_node_relock(trans, path, l)))
+ fail_idx = l;
+
+ l++;
+ } while (l < path->locks_want);
+
+ /*
+ * When we fail to get a lock, we have to ensure that any child nodes
+ * can't be relocked so bch2_btree_path_traverse has to walk back up to
+ * the node that we failed to relock:
+ */
+ if (fail_idx >= 0) {
+ __bch2_btree_path_unlock(trans, path);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+
+ do {
+ path->l[fail_idx].b = upgrade
+ ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
+ : ERR_PTR(-BCH_ERR_no_btree_node_relock);
+ --fail_idx;
+ } while (fail_idx >= 0);
+ }
+
+ if (path->uptodate == BTREE_ITER_NEED_RELOCK)
+ path->uptodate = BTREE_ITER_UPTODATE;
+
+ bch2_trans_verify_locks(trans);
+
+ return path->uptodate < BTREE_ITER_NEED_RELOCK;
+}
+
+bool __bch2_btree_node_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned level,
+ bool trace)
+{
+ struct btree *b = btree_path_node(path, level);
+ int want = __btree_lock_want(path, level);
+
+ if (race_fault())
+ goto fail;
+
+ if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
+ (btree_node_lock_seq_matches(path, b, level) &&
+ btree_node_lock_increment(trans, &b->c, level, want))) {
+ mark_btree_node_locked(trans, path, level, want);
+ return true;
+ }
+fail:
+ if (trace && !trans->notrace_relock_fail)
+ trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
+ return false;
+}
+
+/* upgrade */
+
+bool bch2_btree_node_upgrade(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+ struct btree *b = path->l[level].b;
+ struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
+
+ if (!is_btree_node(path, level))
+ return false;
+
+ switch (btree_lock_want(path, level)) {
+ case BTREE_NODE_UNLOCKED:
+ BUG_ON(btree_node_locked(path, level));
+ return true;
+ case BTREE_NODE_READ_LOCKED:
+ BUG_ON(btree_node_intent_locked(path, level));
+ return bch2_btree_node_relock(trans, path, level);
+ case BTREE_NODE_INTENT_LOCKED:
+ break;
+ case BTREE_NODE_WRITE_LOCKED:
+ BUG();
+ }
+
+ if (btree_node_intent_locked(path, level))
+ return true;
+
+ if (race_fault())
+ return false;
+
+ if (btree_node_locked(path, level)) {
+ bool ret;
+
+ six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
+ ret = six_lock_tryupgrade(&b->c.lock);
+ six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
+
+ if (ret)
+ goto success;
+ } else {
+ if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
+ goto success;
+ }
+
+ /*
+ * Do we already have an intent lock via another path? If so, just bump
+ * lock count:
+ */
+ if (btree_node_lock_seq_matches(path, b, level) &&
+ btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
+ btree_node_unlock(trans, path, level);
+ goto success;
+ }
+
+ trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
+ return false;
+success:
+ mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
+ return true;
+}
+
+/* Btree path locking: */
+
+/*
+ * Only for btree_cache.c - only relocks intent locks
+ */
+int bch2_btree_path_relock_intent(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ unsigned l;
+
+ for (l = path->level;
+ l < path->locks_want && btree_path_node(path, l);
+ l++) {
+ if (!bch2_btree_node_relock(trans, path, l)) {
+ __bch2_btree_path_unlock(trans, path);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+ trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
+ }
+ }
+
+ return 0;
+}
+
+__flatten
+bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
+ struct btree_path *path, unsigned long trace_ip)
+{
+ return btree_path_get_locks(trans, path, false);
+}
+
+int __bch2_btree_path_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned long trace_ip)
+{
+ if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
+ trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
+ }
+
+ return 0;
+}
+
+bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned new_locks_want)
+{
+ EBUG_ON(path->locks_want >= new_locks_want);
+
+ path->locks_want = new_locks_want;
+
+ return btree_path_get_locks(trans, path, true);
+}
+
+bool __bch2_btree_path_upgrade(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned new_locks_want)
+{
+ struct btree_path *linked;
+
+ if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
+ return true;
+
+ /*
+ * XXX: this is ugly - we'd prefer to not be mucking with other
+ * iterators in the btree_trans here.
+ *
+ * On failure to upgrade the iterator, setting iter->locks_want and
+ * calling get_locks() is sufficient to make bch2_btree_path_traverse()
+ * get the locks we want on transaction restart.
+ *
+ * But if this iterator was a clone, on transaction restart what we did
+ * to this iterator isn't going to be preserved.
+ *
+ * Possibly we could add an iterator field for the parent iterator when
+ * an iterator is a copy - for now, we'll just upgrade any other
+ * iterators with the same btree id.
+ *
+ * The code below used to be needed to ensure ancestor nodes get locked
+ * before interior nodes - now that's handled by
+ * bch2_btree_path_traverse_all().
+ */
+ if (!path->cached && !trans->in_traverse_all)
+ trans_for_each_path(trans, linked)
+ if (linked != path &&
+ linked->cached == path->cached &&
+ linked->btree_id == path->btree_id &&
+ linked->locks_want < new_locks_want) {
+ linked->locks_want = new_locks_want;
+ btree_path_get_locks(trans, linked, true);
+ }
+
+ return false;
+}
+
+void __bch2_btree_path_downgrade(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned new_locks_want)
+{
+ unsigned l;
+
+ EBUG_ON(path->locks_want < new_locks_want);
+
+ path->locks_want = new_locks_want;
+
+ while (path->nodes_locked &&
+ (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
+ if (l > path->level) {
+ btree_node_unlock(trans, path, l);
+ } else {
+ if (btree_node_intent_locked(path, l)) {
+ six_lock_downgrade(&path->l[l].b->c.lock);
+ mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
+ }
+ break;
+ }
+ }
+
+ bch2_btree_path_verify_locks(path);
+}
+
+/* Btree transaction locking: */
+
+void bch2_trans_downgrade(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ bch2_btree_path_downgrade(trans, path);
+}
+
+int bch2_trans_relock(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ if (unlikely(trans->restarted))
+ return -((int) trans->restarted);
+
+ trans_for_each_path(trans, path)
+ if (path->should_be_locked &&
+ !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
+ trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+ }
+ return 0;
+}
+
+int bch2_trans_relock_notrace(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ if (unlikely(trans->restarted))
+ return -((int) trans->restarted);
+
+ trans_for_each_path(trans, path)
+ if (path->should_be_locked &&
+ !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+ }
+ return 0;
+}
+
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ __bch2_btree_path_unlock(trans, path);
+}
+
+void bch2_trans_unlock(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ __bch2_btree_path_unlock(trans, path);
+}
+
+bool bch2_trans_locked(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (path->nodes_locked)
+ return true;
+ return false;
+}
+
+int __bch2_trans_mutex_lock(struct btree_trans *trans,
+ struct mutex *lock)
+{
+ int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
+
+ if (ret)
+ mutex_unlock(lock);
+ return ret;
+}
+
+/* Debug */
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+void bch2_btree_path_verify_locks(struct btree_path *path)
+{
+ unsigned l;
+
+ if (!path->nodes_locked) {
+ BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
+ btree_path_node(path, path->level));
+ return;
+ }
+
+ for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+ int want = btree_lock_want(path, l);
+ int have = btree_node_locked_type(path, l);
+
+ BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
+
+ BUG_ON(is_btree_node(path, l) &&
+ (want == BTREE_NODE_UNLOCKED ||
+ have != BTREE_NODE_WRITE_LOCKED) &&
+ want != have);
+ }
+}
+
+void bch2_trans_verify_locks(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ bch2_btree_path_verify_locks(path);
+}
+
+#endif
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
new file mode 100644
index 000000000000..6231e9ffc5d7
--- /dev/null
+++ b/fs/bcachefs/btree_locking.h
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_LOCKING_H
+#define _BCACHEFS_BTREE_LOCKING_H
+
+/*
+ * Only for internal btree use:
+ *
+ * The btree iterator tracks what locks it wants to take, and what locks it
+ * currently has - here we have wrappers for locking/unlocking btree nodes and
+ * updating the iterator state
+ */
+
+#include "btree_iter.h"
+#include "six.h"
+
+void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
+
+#ifdef CONFIG_LOCKDEP
+void bch2_assert_btree_nodes_not_locked(void);
+#else
+static inline void bch2_assert_btree_nodes_not_locked(void) {}
+#endif
+
+void bch2_trans_unlock_noassert(struct btree_trans *);
+
+static inline bool is_btree_node(struct btree_path *path, unsigned l)
+{
+ return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
+}
+
+static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
+{
+ return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
+ ? &trans->c->btree_transaction_stats[trans->fn_idx]
+ : NULL;
+}
+
+/* matches six lock types */
+enum btree_node_locked_type {
+ BTREE_NODE_UNLOCKED = -1,
+ BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
+ BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
+ BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
+};
+
+static inline int btree_node_locked_type(struct btree_path *path,
+ unsigned level)
+{
+ return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
+}
+
+static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
+{
+ return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
+}
+
+static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
+{
+ return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
+}
+
+static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
+{
+ return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
+}
+
+static inline bool btree_node_locked(struct btree_path *path, unsigned level)
+{
+ return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
+}
+
+static inline void mark_btree_node_locked_noreset(struct btree_path *path,
+ unsigned level,
+ enum btree_node_locked_type type)
+{
+ /* relying on this to avoid a branch */
+ BUILD_BUG_ON(SIX_LOCK_read != 0);
+ BUILD_BUG_ON(SIX_LOCK_intent != 1);
+
+ path->nodes_locked &= ~(3U << (level << 1));
+ path->nodes_locked |= (type + 1) << (level << 1);
+}
+
+static inline void mark_btree_node_unlocked(struct btree_path *path,
+ unsigned level)
+{
+ EBUG_ON(btree_node_write_locked(path, level));
+ mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
+}
+
+static inline void mark_btree_node_locked(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned level,
+ enum btree_node_locked_type type)
+{
+ mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
+#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
+ path->l[level].lock_taken_time = local_clock();
+#endif
+}
+
+static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
+{
+ return level < path->locks_want
+ ? SIX_LOCK_intent
+ : SIX_LOCK_read;
+}
+
+static inline enum btree_node_locked_type
+btree_lock_want(struct btree_path *path, int level)
+{
+ if (level < path->level)
+ return BTREE_NODE_UNLOCKED;
+ if (level < path->locks_want)
+ return BTREE_NODE_INTENT_LOCKED;
+ if (level == path->level)
+ return BTREE_NODE_READ_LOCKED;
+ return BTREE_NODE_UNLOCKED;
+}
+
+static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
+ struct btree_transaction_stats *s = btree_trans_stats(trans);
+
+ if (s)
+ __bch2_time_stats_update(&s->lock_hold_times,
+ path->l[level].lock_taken_time,
+ local_clock());
+#endif
+}
+
+/* unlock: */
+
+static inline void btree_node_unlock(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+ int lock_type = btree_node_locked_type(path, level);
+
+ EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+ if (lock_type != BTREE_NODE_UNLOCKED) {
+ six_unlock_type(&path->l[level].b->c.lock, lock_type);
+ btree_trans_lock_hold_time_update(trans, path, level);
+ }
+ mark_btree_node_unlocked(path, level);
+}
+
+static inline int btree_path_lowest_level_locked(struct btree_path *path)
+{
+ return __ffs(path->nodes_locked) >> 1;
+}
+
+static inline int btree_path_highest_level_locked(struct btree_path *path)
+{
+ return __fls(path->nodes_locked) >> 1;
+}
+
+static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
+
+ while (path->nodes_locked)
+ btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
+}
+
+/*
+ * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
+ * succeed:
+ */
+static inline void
+bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
+ struct btree *b)
+{
+ struct btree_path *linked;
+
+ EBUG_ON(path->l[b->c.level].b != b);
+ EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
+ EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
+
+ mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
+
+ trans_for_each_path_with_node(trans, b, linked)
+ linked->l[b->c.level].lock_seq++;
+
+ six_unlock_write(&b->c.lock);
+}
+
+void bch2_btree_node_unlock_write(struct btree_trans *,
+ struct btree_path *, struct btree *);
+
+int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
+
+/* lock: */
+
+static inline int __btree_node_lock_nopath(struct btree_trans *trans,
+ struct btree_bkey_cached_common *b,
+ enum six_lock_type type,
+ bool lock_may_not_fail,
+ unsigned long ip)
+{
+ int ret;
+
+ trans->lock_may_not_fail = lock_may_not_fail;
+ trans->lock_must_abort = false;
+ trans->locking = b;
+
+ ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
+ bch2_six_check_for_deadlock, trans, ip);
+ WRITE_ONCE(trans->locking, NULL);
+ WRITE_ONCE(trans->locking_wait.start_time, 0);
+ return ret;
+}
+
+static inline int __must_check
+btree_node_lock_nopath(struct btree_trans *trans,
+ struct btree_bkey_cached_common *b,
+ enum six_lock_type type,
+ unsigned long ip)
+{
+ return __btree_node_lock_nopath(trans, b, type, false, ip);
+}
+
+static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
+ struct btree_bkey_cached_common *b,
+ enum six_lock_type type)
+{
+ int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
+
+ BUG_ON(ret);
+}
+
+/*
+ * Lock a btree node if we already have it locked on one of our linked
+ * iterators:
+ */
+static inline bool btree_node_lock_increment(struct btree_trans *trans,
+ struct btree_bkey_cached_common *b,
+ unsigned level,
+ enum btree_node_locked_type want)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ if (&path->l[level].b->c == b &&
+ btree_node_locked_type(path, level) >= want) {
+ six_lock_increment(&b->lock, (enum six_lock_type) want);
+ return true;
+ }
+
+ return false;
+}
+
+static inline int btree_node_lock(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_bkey_cached_common *b,
+ unsigned level,
+ enum six_lock_type type,
+ unsigned long ip)
+{
+ int ret = 0;
+
+ EBUG_ON(level >= BTREE_MAX_DEPTH);
+ EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
+
+ if (likely(six_trylock_type(&b->lock, type)) ||
+ btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
+ !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
+#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
+ path->l[b->level].lock_taken_time = local_clock();
+#endif
+ }
+
+ return ret;
+}
+
+int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
+ struct btree_bkey_cached_common *b, bool);
+
+static inline int __btree_node_lock_write(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_bkey_cached_common *b,
+ bool lock_may_not_fail)
+{
+ EBUG_ON(&path->l[b->level].b->c != b);
+ EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
+ EBUG_ON(!btree_node_intent_locked(path, b->level));
+
+ /*
+ * six locks are unfair, and read locks block while a thread wants a
+ * write lock: thus, we need to tell the cycle detector we have a write
+ * lock _before_ taking the lock:
+ */
+ mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
+
+ return likely(six_trylock_write(&b->lock))
+ ? 0
+ : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
+}
+
+static inline int __must_check
+bch2_btree_node_lock_write(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_bkey_cached_common *b)
+{
+ return __btree_node_lock_write(trans, path, b, false);
+}
+
+void bch2_btree_node_lock_write_nofail(struct btree_trans *,
+ struct btree_path *,
+ struct btree_bkey_cached_common *);
+
+/* relock: */
+
+bool bch2_btree_path_relock_norestart(struct btree_trans *,
+ struct btree_path *, unsigned long);
+int __bch2_btree_path_relock(struct btree_trans *,
+ struct btree_path *, unsigned long);
+
+static inline int bch2_btree_path_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned long trace_ip)
+{
+ return btree_node_locked(path, path->level)
+ ? 0
+ : __bch2_btree_path_relock(trans, path, trace_ip);
+}
+
+bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
+
+static inline bool bch2_btree_node_relock(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+ EBUG_ON(btree_node_locked(path, level) &&
+ !btree_node_write_locked(path, level) &&
+ btree_node_locked_type(path, level) != __btree_lock_want(path, level));
+
+ return likely(btree_node_locked(path, level)) ||
+ (!IS_ERR_OR_NULL(path->l[level].b) &&
+ __bch2_btree_node_relock(trans, path, level, true));
+}
+
+static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
+ struct btree_path *path, unsigned level)
+{
+ EBUG_ON(btree_node_locked(path, level) &&
+ !btree_node_write_locked(path, level) &&
+ btree_node_locked_type(path, level) != __btree_lock_want(path, level));
+
+ return likely(btree_node_locked(path, level)) ||
+ (!IS_ERR_OR_NULL(path->l[level].b) &&
+ __bch2_btree_node_relock(trans, path, level, false));
+}
+
+/* upgrade */
+
+bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
+ struct btree_path *, unsigned);
+bool __bch2_btree_path_upgrade(struct btree_trans *,
+ struct btree_path *, unsigned);
+
+static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned new_locks_want)
+{
+ unsigned old_locks_want = path->locks_want;
+
+ new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
+
+ if (path->locks_want < new_locks_want
+ ? __bch2_btree_path_upgrade(trans, path, new_locks_want)
+ : path->uptodate == BTREE_ITER_UPTODATE)
+ return 0;
+
+ trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
+ old_locks_want, new_locks_want);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
+}
+
+/* misc: */
+
+static inline void btree_path_set_should_be_locked(struct btree_path *path)
+{
+ EBUG_ON(!btree_node_locked(path, path->level));
+ EBUG_ON(path->uptodate);
+
+ path->should_be_locked = true;
+}
+
+static inline void __btree_path_set_level_up(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned l)
+{
+ btree_node_unlock(trans, path, l);
+ path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
+}
+
+static inline void btree_path_set_level_up(struct btree_trans *trans,
+ struct btree_path *path)
+{
+ __btree_path_set_level_up(trans, path, path->level++);
+ btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
+}
+
+/* debug */
+
+struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
+ struct btree_path *,
+ struct btree_bkey_cached_common *b,
+ unsigned);
+
+int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_btree_path_verify_locks(struct btree_path *);
+void bch2_trans_verify_locks(struct btree_trans *);
+#else
+static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
+static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
+#endif
+
+#endif /* _BCACHEFS_BTREE_LOCKING_H */
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
new file mode 100644
index 000000000000..04c1f4610972
--- /dev/null
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -0,0 +1,1150 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_gc.h"
+#include "btree_io.h"
+#include "btree_iter.h"
+#include "btree_journal_iter.h"
+#include "btree_key_cache.h"
+#include "btree_update_interior.h"
+#include "btree_write_buffer.h"
+#include "buckets.h"
+#include "errcode.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_reclaim.h"
+#include "replicas.h"
+#include "snapshot.h"
+
+#include <linux/prefetch.h>
+
+static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct bch_fs *c = trans->c;
+ struct bkey u;
+ struct bkey_s_c k = bch2_btree_path_peek_slot_exact(i->path, &u);
+
+ if (unlikely(trans->journal_replay_not_finished)) {
+ struct bkey_i *j_k =
+ bch2_journal_keys_peek_slot(c, i->btree_id, i->level, i->k->k.p);
+
+ if (j_k)
+ k = bkey_i_to_s_c(j_k);
+ }
+
+ u = *k.k;
+ u.needs_whiteout = i->old_k.needs_whiteout;
+
+ BUG_ON(memcmp(&i->old_k, &u, sizeof(struct bkey)));
+ BUG_ON(i->old_v != k.v);
+#endif
+}
+
+static inline struct btree_path_level *insert_l(struct btree_insert_entry *i)
+{
+ return i->path->l + i->level;
+}
+
+static inline bool same_leaf_as_prev(struct btree_trans *trans,
+ struct btree_insert_entry *i)
+{
+ return i != trans->updates &&
+ insert_l(&i[0])->b == insert_l(&i[-1])->b;
+}
+
+static inline bool same_leaf_as_next(struct btree_trans *trans,
+ struct btree_insert_entry *i)
+{
+ return i + 1 < trans->updates + trans->nr_updates &&
+ insert_l(&i[0])->b == insert_l(&i[1])->b;
+}
+
+inline void bch2_btree_node_prep_for_write(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
+{
+ struct bch_fs *c = trans->c;
+
+ if (unlikely(btree_node_just_written(b)) &&
+ bch2_btree_post_write_cleanup(c, b))
+ bch2_trans_node_reinit_iter(trans, b);
+
+ /*
+ * If the last bset has been written, or if it's gotten too big - start
+ * a new bset to insert into:
+ */
+ if (want_new_bset(c, b))
+ bch2_btree_init_next(trans, b);
+}
+
+/* Inserting into a given leaf node (last stage of insert): */
+
+/* Handle overwrites and do insert, for non extents: */
+bool bch2_btree_bset_insert_key(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter *node_iter,
+ struct bkey_i *insert)
+{
+ struct bkey_packed *k;
+ unsigned clobber_u64s = 0, new_u64s = 0;
+
+ EBUG_ON(btree_node_just_written(b));
+ EBUG_ON(bset_written(b, btree_bset_last(b)));
+ EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
+ EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
+ EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
+ EBUG_ON(insert->k.u64s >
+ bch_btree_keys_u64s_remaining(trans->c, b));
+ EBUG_ON(!b->c.level && !bpos_eq(insert->k.p, path->pos));
+
+ k = bch2_btree_node_iter_peek_all(node_iter, b);
+ if (k && bkey_cmp_left_packed(b, k, &insert->k.p))
+ k = NULL;
+
+ /* @k is the key being overwritten/deleted, if any: */
+ EBUG_ON(k && bkey_deleted(k));
+
+ /* Deleting, but not found? nothing to do: */
+ if (bkey_deleted(&insert->k) && !k)
+ return false;
+
+ if (bkey_deleted(&insert->k)) {
+ /* Deleting: */
+ btree_account_key_drop(b, k);
+ k->type = KEY_TYPE_deleted;
+
+ if (k->needs_whiteout)
+ push_whiteout(trans->c, b, insert->k.p);
+ k->needs_whiteout = false;
+
+ if (k >= btree_bset_last(b)->start) {
+ clobber_u64s = k->u64s;
+ bch2_bset_delete(b, k, clobber_u64s);
+ goto fix_iter;
+ } else {
+ bch2_btree_path_fix_key_modified(trans, b, k);
+ }
+
+ return true;
+ }
+
+ if (k) {
+ /* Overwriting: */
+ btree_account_key_drop(b, k);
+ k->type = KEY_TYPE_deleted;
+
+ insert->k.needs_whiteout = k->needs_whiteout;
+ k->needs_whiteout = false;
+
+ if (k >= btree_bset_last(b)->start) {
+ clobber_u64s = k->u64s;
+ goto overwrite;
+ } else {
+ bch2_btree_path_fix_key_modified(trans, b, k);
+ }
+ }
+
+ k = bch2_btree_node_iter_bset_pos(node_iter, b, bset_tree_last(b));
+overwrite:
+ bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
+ new_u64s = k->u64s;
+fix_iter:
+ if (clobber_u64s != new_u64s)
+ bch2_btree_node_iter_fix(trans, path, b, node_iter, k,
+ clobber_u64s, new_u64s);
+ return true;
+}
+
+static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
+ unsigned i, u64 seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct btree_write *w = container_of(pin, struct btree_write, journal);
+ struct btree *b = container_of(w, struct btree, writes[i]);
+ struct btree_trans *trans = bch2_trans_get(c);
+ unsigned long old, new, v;
+ unsigned idx = w - b->writes;
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
+ v = READ_ONCE(b->flags);
+
+ do {
+ old = new = v;
+
+ if (!(old & (1 << BTREE_NODE_dirty)) ||
+ !!(old & (1 << BTREE_NODE_write_idx)) != idx ||
+ w->journal.seq != seq)
+ break;
+
+ new &= ~BTREE_WRITE_TYPE_MASK;
+ new |= BTREE_WRITE_journal_reclaim;
+ new |= 1 << BTREE_NODE_need_write;
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
+
+ btree_node_write_if_need(c, b, SIX_LOCK_read);
+ six_unlock_read(&b->c.lock);
+
+ bch2_trans_put(trans);
+ return 0;
+}
+
+int bch2_btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
+{
+ return __btree_node_flush(j, pin, 0, seq);
+}
+
+int bch2_btree_node_flush1(struct journal *j, struct journal_entry_pin *pin, u64 seq)
+{
+ return __btree_node_flush(j, pin, 1, seq);
+}
+
+inline void bch2_btree_add_journal_pin(struct bch_fs *c,
+ struct btree *b, u64 seq)
+{
+ struct btree_write *w = btree_current_write(b);
+
+ bch2_journal_pin_add(&c->journal, seq, &w->journal,
+ btree_node_write_idx(b) == 0
+ ? bch2_btree_node_flush0
+ : bch2_btree_node_flush1);
+}
+
+/**
+ * bch2_btree_insert_key_leaf() - insert a key one key into a leaf node
+ * @trans: btree transaction object
+ * @path: path pointing to @insert's pos
+ * @insert: key to insert
+ * @journal_seq: sequence number of journal reservation
+ */
+inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
+ struct btree_path *path,
+ struct bkey_i *insert,
+ u64 journal_seq)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b = path_l(path)->b;
+ struct bset_tree *t = bset_tree_last(b);
+ struct bset *i = bset(b, t);
+ int old_u64s = bset_u64s(t);
+ int old_live_u64s = b->nr.live_u64s;
+ int live_u64s_added, u64s_added;
+
+ if (unlikely(!bch2_btree_bset_insert_key(trans, path, b,
+ &path_l(path)->iter, insert)))
+ return;
+
+ i->journal_seq = cpu_to_le64(max(journal_seq, le64_to_cpu(i->journal_seq)));
+
+ bch2_btree_add_journal_pin(c, b, journal_seq);
+
+ if (unlikely(!btree_node_dirty(b))) {
+ EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
+ set_btree_node_dirty_acct(c, b);
+ }
+
+ live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
+ u64s_added = (int) bset_u64s(t) - old_u64s;
+
+ if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
+ b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
+ if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
+ b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
+
+ if (u64s_added > live_u64s_added &&
+ bch2_maybe_compact_whiteouts(c, b))
+ bch2_trans_node_reinit_iter(trans, b);
+}
+
+/* Cached btree updates: */
+
+/* Normal update interface: */
+
+static inline void btree_insert_entry_checks(struct btree_trans *trans,
+ struct btree_insert_entry *i)
+{
+ BUG_ON(!bpos_eq(i->k->k.p, i->path->pos));
+ BUG_ON(i->cached != i->path->cached);
+ BUG_ON(i->level != i->path->level);
+ BUG_ON(i->btree_id != i->path->btree_id);
+ EBUG_ON(!i->level &&
+ !(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
+ test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
+ i->k->k.p.snapshot &&
+ bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot));
+}
+
+static noinline int
+bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned flags,
+ unsigned long trace_ip)
+{
+ return drop_locks_do(trans,
+ bch2_journal_preres_get(&trans->c->journal,
+ &trans->journal_preres,
+ trans->journal_preres_u64s,
+ (flags & BCH_WATERMARK_MASK)));
+}
+
+static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
+ unsigned flags)
+{
+ return bch2_journal_res_get(&trans->c->journal, &trans->journal_res,
+ trans->journal_u64s, flags);
+}
+
+#define JSET_ENTRY_LOG_U64s 4
+
+static noinline void journal_transaction_name(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct journal *j = &c->journal;
+ struct jset_entry *entry =
+ bch2_journal_add_entry(j, &trans->journal_res,
+ BCH_JSET_ENTRY_log, 0, 0,
+ JSET_ENTRY_LOG_U64s);
+ struct jset_entry_log *l =
+ container_of(entry, struct jset_entry_log, entry);
+
+ strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64));
+}
+
+static inline int btree_key_can_insert(struct btree_trans *trans,
+ struct btree *b, unsigned u64s)
+{
+ struct bch_fs *c = trans->c;
+
+ if (!bch2_btree_node_insert_fits(c, b, u64s))
+ return -BCH_ERR_btree_insert_btree_node_full;
+
+ return 0;
+}
+
+static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags,
+ struct btree_path *path, unsigned u64s)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached *ck = (void *) path->l[0].b;
+ struct btree_insert_entry *i;
+ unsigned new_u64s;
+ struct bkey_i *new_k;
+
+ EBUG_ON(path->level);
+
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
+ bch2_btree_key_cache_must_wait(c) &&
+ !(flags & BTREE_INSERT_JOURNAL_RECLAIM))
+ return -BCH_ERR_btree_insert_need_journal_reclaim;
+
+ /*
+ * bch2_varint_decode can read past the end of the buffer by at most 7
+ * bytes (it won't be used):
+ */
+ u64s += 1;
+
+ if (u64s <= ck->u64s)
+ return 0;
+
+ new_u64s = roundup_pow_of_two(u64s);
+ new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
+ if (!new_k) {
+ bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
+ bch2_btree_ids[path->btree_id], new_u64s);
+ return -BCH_ERR_ENOMEM_btree_key_cache_insert;
+ }
+
+ trans_for_each_update(trans, i)
+ if (i->old_v == &ck->k->v)
+ i->old_v = &new_k->v;
+
+ ck->u64s = new_u64s;
+ ck->k = new_k;
+ return 0;
+}
+
+/* Triggers: */
+
+static int run_one_mem_trigger(struct btree_trans *trans,
+ struct btree_insert_entry *i,
+ unsigned flags)
+{
+ struct bkey_s_c old = { &i->old_k, i->old_v };
+ struct bkey_i *new = i->k;
+ const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
+ const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
+ int ret;
+
+ verify_update_old_key(trans, i);
+
+ if (unlikely(flags & BTREE_TRIGGER_NORUN))
+ return 0;
+
+ if (!btree_node_type_needs_gc((enum btree_node_type) i->btree_id))
+ return 0;
+
+ if (old_ops->atomic_trigger == new_ops->atomic_trigger &&
+ ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+ ret = bch2_mark_key(trans, i->btree_id, i->level,
+ old, bkey_i_to_s_c(new),
+ BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
+ } else {
+ struct bkey _deleted = KEY(0, 0, 0);
+ struct bkey_s_c deleted = (struct bkey_s_c) { &_deleted, NULL };
+
+ _deleted.p = i->path->pos;
+
+ ret = bch2_mark_key(trans, i->btree_id, i->level,
+ deleted, bkey_i_to_s_c(new),
+ BTREE_TRIGGER_INSERT|flags) ?:
+ bch2_mark_key(trans, i->btree_id, i->level,
+ old, deleted,
+ BTREE_TRIGGER_OVERWRITE|flags);
+ }
+
+ return ret;
+}
+
+static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i,
+ bool overwrite)
+{
+ /*
+ * Transactional triggers create new btree_insert_entries, so we can't
+ * pass them a pointer to a btree_insert_entry, that memory is going to
+ * move:
+ */
+ struct bkey old_k = i->old_k;
+ struct bkey_s_c old = { &old_k, i->old_v };
+ const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
+ const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
+
+ verify_update_old_key(trans, i);
+
+ if ((i->flags & BTREE_TRIGGER_NORUN) ||
+ !(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)))
+ return 0;
+
+ if (!i->insert_trigger_run &&
+ !i->overwrite_trigger_run &&
+ old_ops->trans_trigger == new_ops->trans_trigger &&
+ ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+ i->overwrite_trigger_run = true;
+ i->insert_trigger_run = true;
+ return bch2_trans_mark_key(trans, i->btree_id, i->level, old, i->k,
+ BTREE_TRIGGER_INSERT|
+ BTREE_TRIGGER_OVERWRITE|
+ i->flags) ?: 1;
+ } else if (overwrite && !i->overwrite_trigger_run) {
+ i->overwrite_trigger_run = true;
+ return bch2_trans_mark_old(trans, i->btree_id, i->level, old, i->flags) ?: 1;
+ } else if (!overwrite && !i->insert_trigger_run) {
+ i->insert_trigger_run = true;
+ return bch2_trans_mark_new(trans, i->btree_id, i->level, i->k, i->flags) ?: 1;
+ } else {
+ return 0;
+ }
+}
+
+static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
+ struct btree_insert_entry *btree_id_start)
+{
+ struct btree_insert_entry *i;
+ bool trans_trigger_run;
+ int ret, overwrite;
+
+ for (overwrite = 1; overwrite >= 0; --overwrite) {
+
+ /*
+ * Running triggers will append more updates to the list of updates as
+ * we're walking it:
+ */
+ do {
+ trans_trigger_run = false;
+
+ for (i = btree_id_start;
+ i < trans->updates + trans->nr_updates && i->btree_id <= btree_id;
+ i++) {
+ if (i->btree_id != btree_id)
+ continue;
+
+ ret = run_one_trans_trigger(trans, i, overwrite);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ trans_trigger_run = true;
+ }
+ } while (trans_trigger_run);
+ }
+
+ return 0;
+}
+
+static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
+{
+ struct btree_insert_entry *i = NULL, *btree_id_start = trans->updates;
+ unsigned btree_id = 0;
+ int ret = 0;
+
+ /*
+ *
+ * For a given btree, this algorithm runs insert triggers before
+ * overwrite triggers: this is so that when extents are being moved
+ * (e.g. by FALLOCATE_FL_INSERT_RANGE), we don't drop references before
+ * they are re-added.
+ */
+ for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
+ if (btree_id == BTREE_ID_alloc)
+ continue;
+
+ while (btree_id_start < trans->updates + trans->nr_updates &&
+ btree_id_start->btree_id < btree_id)
+ btree_id_start++;
+
+ ret = run_btree_triggers(trans, btree_id, btree_id_start);
+ if (ret)
+ return ret;
+ }
+
+ trans_for_each_update(trans, i) {
+ if (i->btree_id > BTREE_ID_alloc)
+ break;
+ if (i->btree_id == BTREE_ID_alloc) {
+ ret = run_btree_triggers(trans, BTREE_ID_alloc, i);
+ if (ret)
+ return ret;
+ break;
+ }
+ }
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans_for_each_update(trans, i)
+ BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) &&
+ (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
+ (!i->insert_trigger_run || !i->overwrite_trigger_run));
+#endif
+ return 0;
+}
+
+static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i;
+ int ret = 0;
+
+ trans_for_each_update(trans, i) {
+ /*
+ * XXX: synchronization of cached update triggers with gc
+ * XXX: synchronization of interior node updates with gc
+ */
+ BUG_ON(i->cached || i->level);
+
+ if (gc_visited(c, gc_pos_btree_node(insert_l(i)->b))) {
+ ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static inline int
+bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
+ struct btree_insert_entry **stopped_at,
+ unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i;
+ struct btree_write_buffered_key *wb;
+ struct btree_trans_commit_hook *h;
+ unsigned u64s = 0;
+ int ret;
+
+ if (race_fault()) {
+ trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
+ return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
+ }
+
+ /*
+ * Check if the insert will fit in the leaf node with the write lock
+ * held, otherwise another thread could write the node changing the
+ * amount of space available:
+ */
+
+ prefetch(&trans->c->journal.flags);
+
+ trans_for_each_update(trans, i) {
+ /* Multiple inserts might go to same leaf: */
+ if (!same_leaf_as_prev(trans, i))
+ u64s = 0;
+
+ u64s += i->k->k.u64s;
+ ret = !i->cached
+ ? btree_key_can_insert(trans, insert_l(i)->b, u64s)
+ : btree_key_can_insert_cached(trans, flags, i->path, u64s);
+ if (ret) {
+ *stopped_at = i;
+ return ret;
+ }
+ }
+
+ if (trans->nr_wb_updates &&
+ trans->nr_wb_updates + c->btree_write_buffer.state.nr > c->btree_write_buffer.size)
+ return -BCH_ERR_btree_insert_need_flush_buffer;
+
+ /*
+ * Don't get journal reservation until after we know insert will
+ * succeed:
+ */
+ if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
+ ret = bch2_trans_journal_res_get(trans,
+ (flags & BCH_WATERMARK_MASK)|
+ JOURNAL_RES_GET_NONBLOCK);
+ if (ret)
+ return ret;
+
+ if (unlikely(trans->journal_transaction_names))
+ journal_transaction_name(trans);
+ } else {
+ trans->journal_res.seq = c->journal.replay_journal_seq;
+ }
+
+ /*
+ * Not allowed to fail after we've gotten our journal reservation - we
+ * have to use it:
+ */
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
+ !(flags & BTREE_INSERT_JOURNAL_REPLAY)) {
+ if (bch2_journal_seq_verify)
+ trans_for_each_update(trans, i)
+ i->k->k.version.lo = trans->journal_res.seq;
+ else if (bch2_inject_invalid_keys)
+ trans_for_each_update(trans, i)
+ i->k->k.version = MAX_VERSION;
+ }
+
+ if (trans->fs_usage_deltas &&
+ bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas))
+ return -BCH_ERR_btree_insert_need_mark_replicas;
+
+ if (trans->nr_wb_updates) {
+ EBUG_ON(flags & BTREE_INSERT_JOURNAL_REPLAY);
+
+ ret = bch2_btree_insert_keys_write_buffer(trans);
+ if (ret)
+ goto revert_fs_usage;
+ }
+
+ h = trans->hooks;
+ while (h) {
+ ret = h->fn(trans, h);
+ if (ret)
+ goto revert_fs_usage;
+ h = h->next;
+ }
+
+ trans_for_each_update(trans, i)
+ if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) {
+ ret = run_one_mem_trigger(trans, i, i->flags);
+ if (ret)
+ goto fatal_err;
+ }
+
+ if (unlikely(c->gc_pos.phase)) {
+ ret = bch2_trans_commit_run_gc_triggers(trans);
+ if (ret)
+ goto fatal_err;
+ }
+
+ if (unlikely(trans->extra_journal_entries.nr)) {
+ memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
+ trans->extra_journal_entries.data,
+ trans->extra_journal_entries.nr);
+
+ trans->journal_res.offset += trans->extra_journal_entries.nr;
+ trans->journal_res.u64s -= trans->extra_journal_entries.nr;
+ }
+
+ if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
+ struct journal *j = &c->journal;
+ struct jset_entry *entry;
+
+ trans_for_each_update(trans, i) {
+ if (i->key_cache_already_flushed)
+ continue;
+
+ if (i->flags & BTREE_UPDATE_NOJOURNAL)
+ continue;
+
+ verify_update_old_key(trans, i);
+
+ if (trans->journal_transaction_names) {
+ entry = bch2_journal_add_entry(j, &trans->journal_res,
+ BCH_JSET_ENTRY_overwrite,
+ i->btree_id, i->level,
+ i->old_k.u64s);
+ bkey_reassemble(&entry->start[0],
+ (struct bkey_s_c) { &i->old_k, i->old_v });
+ }
+
+ entry = bch2_journal_add_entry(j, &trans->journal_res,
+ BCH_JSET_ENTRY_btree_keys,
+ i->btree_id, i->level,
+ i->k->k.u64s);
+ bkey_copy(&entry->start[0], i->k);
+ }
+
+ trans_for_each_wb_update(trans, wb) {
+ entry = bch2_journal_add_entry(j, &trans->journal_res,
+ BCH_JSET_ENTRY_btree_keys,
+ wb->btree, 0,
+ wb->k.k.u64s);
+ bkey_copy(&entry->start[0], &wb->k);
+ }
+
+ if (trans->journal_seq)
+ *trans->journal_seq = trans->journal_res.seq;
+ }
+
+ trans_for_each_update(trans, i) {
+ i->k->k.needs_whiteout = false;
+
+ if (!i->cached) {
+ u64 seq = trans->journal_res.seq;
+
+ if (i->flags & BTREE_UPDATE_PREJOURNAL)
+ seq = i->seq;
+
+ bch2_btree_insert_key_leaf(trans, i->path, i->k, seq);
+ } else if (!i->key_cache_already_flushed)
+ bch2_btree_insert_key_cached(trans, flags, i);
+ else {
+ bch2_btree_key_cache_drop(trans, i->path);
+ btree_path_set_dirty(i->path, BTREE_ITER_NEED_TRAVERSE);
+ }
+ }
+
+ return 0;
+fatal_err:
+ bch2_fatal_error(c);
+revert_fs_usage:
+ if (trans->fs_usage_deltas)
+ bch2_trans_fs_usage_revert(trans, trans->fs_usage_deltas);
+ return ret;
+}
+
+static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btree_insert_entry *i)
+{
+ while (--i >= trans->updates) {
+ if (same_leaf_as_prev(trans, i))
+ continue;
+
+ bch2_btree_node_unlock_write(trans, i->path, insert_l(i)->b);
+ }
+
+ trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
+}
+
+static inline int trans_lock_write(struct btree_trans *trans)
+{
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i) {
+ if (same_leaf_as_prev(trans, i))
+ continue;
+
+ if (bch2_btree_node_lock_write(trans, i->path, &insert_l(i)->b->c))
+ return trans_lock_write_fail(trans, i);
+
+ if (!i->cached)
+ bch2_btree_node_prep_for_write(trans, i->path, insert_l(i)->b);
+ }
+
+ return 0;
+}
+
+static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
+{
+ struct btree_insert_entry *i;
+ struct btree_write_buffered_key *wb;
+
+ trans_for_each_update(trans, i)
+ bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
+
+ trans_for_each_wb_update(trans, wb)
+ bch2_journal_key_overwritten(trans->c, wb->btree, 0, wb->k.k.p);
+}
+
+static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans, unsigned flags,
+ struct btree_insert_entry *i,
+ struct printbuf *err)
+{
+ struct bch_fs *c = trans->c;
+ int rw = (flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
+
+ printbuf_reset(err);
+ prt_printf(err, "invalid bkey on insert from %s -> %ps",
+ trans->fn, (void *) i->ip_allocated);
+ prt_newline(err);
+ printbuf_indent_add(err, 2);
+
+ bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
+ prt_newline(err);
+
+ bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
+ i->bkey_type, rw, err);
+ bch2_print_string_as_lines(KERN_ERR, err->buf);
+
+ bch2_inconsistent_error(c);
+ bch2_dump_trans_updates(trans);
+
+ return -EINVAL;
+}
+
+/*
+ * Get journal reservation, take write locks, and attempt to do btree update(s):
+ */
+static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags,
+ struct btree_insert_entry **stopped_at,
+ unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i;
+ int ret = 0, u64s_delta = 0;
+
+ trans_for_each_update(trans, i) {
+ if (i->cached)
+ continue;
+
+ u64s_delta += !bkey_deleted(&i->k->k) ? i->k->k.u64s : 0;
+ u64s_delta -= i->old_btree_u64s;
+
+ if (!same_leaf_as_next(trans, i)) {
+ if (u64s_delta <= 0) {
+ ret = bch2_foreground_maybe_merge(trans, i->path,
+ i->level, flags);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ u64s_delta = 0;
+ }
+ }
+
+ ret = bch2_journal_preres_get(&c->journal,
+ &trans->journal_preres, trans->journal_preres_u64s,
+ (flags & BCH_WATERMARK_MASK)|JOURNAL_RES_GET_NONBLOCK);
+ if (unlikely(ret == -BCH_ERR_journal_preres_get_blocked))
+ ret = bch2_trans_journal_preres_get_cold(trans, flags, trace_ip);
+ if (unlikely(ret))
+ return ret;
+
+ ret = trans_lock_write(trans);
+ if (unlikely(ret))
+ return ret;
+
+ ret = bch2_trans_commit_write_locked(trans, flags, stopped_at, trace_ip);
+
+ if (!ret && unlikely(trans->journal_replay_not_finished))
+ bch2_drop_overwrites_from_journal(trans);
+
+ trans_for_each_update(trans, i)
+ if (!same_leaf_as_prev(trans, i))
+ bch2_btree_node_unlock_write_inlined(trans, i->path,
+ insert_l(i)->b);
+
+ if (!ret && trans->journal_pin)
+ bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
+ trans->journal_pin, NULL);
+
+ /*
+ * Drop journal reservation after dropping write locks, since dropping
+ * the journal reservation may kick off a journal write:
+ */
+ bch2_journal_res_put(&c->journal, &trans->journal_res);
+
+ if (unlikely(ret))
+ return ret;
+
+ bch2_trans_downgrade(trans);
+
+ return 0;
+}
+
+static int journal_reclaim_wait_done(struct bch_fs *c)
+{
+ int ret = bch2_journal_error(&c->journal) ?:
+ !bch2_btree_key_cache_must_wait(c);
+
+ if (!ret)
+ journal_reclaim_kick(&c->journal);
+ return ret;
+}
+
+static noinline
+int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
+ struct btree_insert_entry *i,
+ int ret, unsigned long trace_ip)
+{
+ struct bch_fs *c = trans->c;
+
+ switch (ret) {
+ case -BCH_ERR_btree_insert_btree_node_full:
+ ret = bch2_btree_split_leaf(trans, i->path, flags);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path);
+ break;
+ case -BCH_ERR_btree_insert_need_mark_replicas:
+ ret = drop_locks_do(trans,
+ bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas));
+ break;
+ case -BCH_ERR_journal_res_get_blocked:
+ /*
+ * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
+ * flag
+ */
+ if ((flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
+ (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) {
+ ret = -BCH_ERR_journal_reclaim_would_deadlock;
+ break;
+ }
+
+ ret = drop_locks_do(trans,
+ bch2_trans_journal_res_get(trans,
+ (flags & BCH_WATERMARK_MASK)|
+ JOURNAL_RES_GET_CHECK));
+ break;
+ case -BCH_ERR_btree_insert_need_journal_reclaim:
+ bch2_trans_unlock(trans);
+
+ trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
+
+ wait_event_freezable(c->journal.reclaim_wait,
+ (ret = journal_reclaim_wait_done(c)));
+ if (ret < 0)
+ break;
+
+ ret = bch2_trans_relock(trans);
+ break;
+ case -BCH_ERR_btree_insert_need_flush_buffer: {
+ struct btree_write_buffer *wb = &c->btree_write_buffer;
+
+ ret = 0;
+
+ if (wb->state.nr > wb->size * 3 / 4) {
+ bch2_trans_unlock(trans);
+ mutex_lock(&wb->flush_lock);
+
+ if (wb->state.nr > wb->size * 3 / 4) {
+ bch2_trans_begin(trans);
+ ret = __bch2_btree_write_buffer_flush(trans,
+ flags|BTREE_INSERT_NOCHECK_RW, true);
+ if (!ret) {
+ trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
+ }
+ } else {
+ mutex_unlock(&wb->flush_lock);
+ ret = bch2_trans_relock(trans);
+ }
+ }
+ break;
+ }
+ default:
+ BUG_ON(ret >= 0);
+ break;
+ }
+
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
+
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOSPC) &&
+ !(flags & BTREE_INSERT_NOWAIT) &&
+ (flags & BTREE_INSERT_NOFAIL), c,
+ "%s: incorrectly got %s\n", __func__, bch2_err_str(ret));
+
+ return ret;
+}
+
+static noinline int
+bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ int ret;
+
+ if (likely(!(flags & BTREE_INSERT_LAZY_RW)) ||
+ test_bit(BCH_FS_STARTED, &c->flags))
+ return -BCH_ERR_erofs_trans_commit;
+
+ ret = drop_locks_do(trans, bch2_fs_read_write_early(c));
+ if (ret)
+ return ret;
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_trans);
+ return 0;
+}
+
+/*
+ * This is for updates done in the early part of fsck - btree_gc - before we've
+ * gone RW. we only add the new key to the list of keys for journal replay to
+ * do.
+ */
+static noinline int
+do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i;
+ int ret = 0;
+
+ trans_for_each_update(trans, i) {
+ ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i = NULL;
+ struct btree_write_buffered_key *wb;
+ unsigned u64s;
+ int ret = 0;
+
+ if (!trans->nr_updates &&
+ !trans->nr_wb_updates &&
+ !trans->extra_journal_entries.nr)
+ goto out_reset;
+
+ if (flags & BTREE_INSERT_GC_LOCK_HELD)
+ lockdep_assert_held(&c->gc_lock);
+
+ ret = bch2_trans_commit_run_triggers(trans);
+ if (ret)
+ goto out_reset;
+
+ trans_for_each_update(trans, i) {
+ struct printbuf buf = PRINTBUF;
+ enum bkey_invalid_flags invalid_flags = 0;
+
+ if (!(flags & BTREE_INSERT_JOURNAL_REPLAY))
+ invalid_flags |= BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT;
+
+ if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
+ i->bkey_type, invalid_flags, &buf)))
+ ret = bch2_trans_commit_bkey_invalid(trans, flags, i, &buf);
+ btree_insert_entry_checks(trans, i);
+ printbuf_exit(&buf);
+
+ if (ret)
+ return ret;
+ }
+
+ if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
+ ret = do_bch2_trans_commit_to_journal_replay(trans);
+ goto out_reset;
+ }
+
+ if (!(flags & BTREE_INSERT_NOCHECK_RW) &&
+ unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) {
+ ret = bch2_trans_commit_get_rw_cold(trans, flags);
+ if (ret)
+ goto out_reset;
+ }
+
+ if (c->btree_write_buffer.state.nr > c->btree_write_buffer.size / 2 &&
+ mutex_trylock(&c->btree_write_buffer.flush_lock)) {
+ bch2_trans_begin(trans);
+ bch2_trans_unlock(trans);
+
+ ret = __bch2_btree_write_buffer_flush(trans,
+ flags|BTREE_INSERT_NOCHECK_RW, true);
+ if (!ret) {
+ trace_and_count(c, trans_restart_write_buffer_flush, trans, _THIS_IP_);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_write_buffer_flush);
+ }
+ goto out;
+ }
+
+ EBUG_ON(test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags));
+
+ memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
+
+ trans->journal_u64s = trans->extra_journal_entries.nr;
+ trans->journal_preres_u64s = 0;
+
+ trans->journal_transaction_names = READ_ONCE(c->opts.journal_transaction_names);
+
+ if (trans->journal_transaction_names)
+ trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
+
+ trans_for_each_update(trans, i) {
+ EBUG_ON(!i->path->should_be_locked);
+
+ ret = bch2_btree_path_upgrade(trans, i->path, i->level + 1);
+ if (unlikely(ret))
+ goto out;
+
+ EBUG_ON(!btree_node_intent_locked(i->path, i->level));
+
+ if (i->key_cache_already_flushed)
+ continue;
+
+ /* we're going to journal the key being updated: */
+ u64s = jset_u64s(i->k->k.u64s);
+ if (i->cached &&
+ likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY)))
+ trans->journal_preres_u64s += u64s;
+
+ if (i->flags & BTREE_UPDATE_NOJOURNAL)
+ continue;
+
+ trans->journal_u64s += u64s;
+
+ /* and we're also going to log the overwrite: */
+ if (trans->journal_transaction_names)
+ trans->journal_u64s += jset_u64s(i->old_k.u64s);
+ }
+
+ trans_for_each_wb_update(trans, wb)
+ trans->journal_u64s += jset_u64s(wb->k.k.u64s);
+
+ if (trans->extra_journal_res) {
+ ret = bch2_disk_reservation_add(c, trans->disk_res,
+ trans->extra_journal_res,
+ (flags & BTREE_INSERT_NOFAIL)
+ ? BCH_DISK_RESERVATION_NOFAIL : 0);
+ if (ret)
+ goto err;
+ }
+retry:
+ bch2_trans_verify_not_in_restart(trans);
+ memset(&trans->journal_res, 0, sizeof(trans->journal_res));
+
+ ret = do_bch2_trans_commit(trans, flags, &i, _RET_IP_);
+
+ /* make sure we didn't drop or screw up locks: */
+ bch2_trans_verify_locks(trans);
+
+ if (ret)
+ goto err;
+
+ trace_and_count(c, transaction_commit, trans, _RET_IP_);
+out:
+ bch2_journal_preres_put(&c->journal, &trans->journal_preres);
+
+ if (likely(!(flags & BTREE_INSERT_NOCHECK_RW)))
+ bch2_write_ref_put(c, BCH_WRITE_REF_trans);
+out_reset:
+ bch2_trans_reset_updates(trans);
+
+ return ret;
+err:
+ ret = bch2_trans_commit_error(trans, flags, i, ret, _RET_IP_);
+ if (ret)
+ goto out;
+
+ goto retry;
+}
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
new file mode 100644
index 000000000000..c9a38e254949
--- /dev/null
+++ b/fs/bcachefs/btree_types.h
@@ -0,0 +1,739 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_TYPES_H
+#define _BCACHEFS_BTREE_TYPES_H
+
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+
+//#include "bkey_methods.h"
+#include "buckets_types.h"
+#include "darray.h"
+#include "errcode.h"
+#include "journal_types.h"
+#include "replicas_types.h"
+#include "six.h"
+
+struct open_bucket;
+struct btree_update;
+struct btree_trans;
+
+#define MAX_BSETS 3U
+
+struct btree_nr_keys {
+
+ /*
+ * Amount of live metadata (i.e. size of node after a compaction) in
+ * units of u64s
+ */
+ u16 live_u64s;
+ u16 bset_u64s[MAX_BSETS];
+
+ /* live keys only: */
+ u16 packed_keys;
+ u16 unpacked_keys;
+};
+
+struct bset_tree {
+ /*
+ * We construct a binary tree in an array as if the array
+ * started at 1, so that things line up on the same cachelines
+ * better: see comments in bset.c at cacheline_to_bkey() for
+ * details
+ */
+
+ /* size of the binary tree and prev array */
+ u16 size;
+
+ /* function of size - precalculated for to_inorder() */
+ u16 extra;
+
+ u16 data_offset;
+ u16 aux_data_offset;
+ u16 end_offset;
+};
+
+struct btree_write {
+ struct journal_entry_pin journal;
+};
+
+struct btree_alloc {
+ struct open_buckets ob;
+ __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX);
+};
+
+struct btree_bkey_cached_common {
+ struct six_lock lock;
+ u8 level;
+ u8 btree_id;
+ bool cached;
+};
+
+struct btree {
+ struct btree_bkey_cached_common c;
+
+ struct rhash_head hash;
+ u64 hash_val;
+
+ unsigned long flags;
+ u16 written;
+ u8 nsets;
+ u8 nr_key_bits;
+ u16 version_ondisk;
+
+ struct bkey_format format;
+
+ struct btree_node *data;
+ void *aux_data;
+
+ /*
+ * Sets of sorted keys - the real btree node - plus a binary search tree
+ *
+ * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
+ * to the memory we have allocated for this btree node. Additionally,
+ * set[0]->data points to the entire btree node as it exists on disk.
+ */
+ struct bset_tree set[MAX_BSETS];
+
+ struct btree_nr_keys nr;
+ u16 sib_u64s[2];
+ u16 whiteout_u64s;
+ u8 byte_order;
+ u8 unpack_fn_len;
+
+ struct btree_write writes[2];
+
+ /* Key/pointer for this btree node */
+ __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
+
+ /*
+ * XXX: add a delete sequence number, so when bch2_btree_node_relock()
+ * fails because the lock sequence number has changed - i.e. the
+ * contents were modified - we can still relock the node if it's still
+ * the one we want, without redoing the traversal
+ */
+
+ /*
+ * For asynchronous splits/interior node updates:
+ * When we do a split, we allocate new child nodes and update the parent
+ * node to point to them: we update the parent in memory immediately,
+ * but then we must wait until the children have been written out before
+ * the update to the parent can be written - this is a list of the
+ * btree_updates that are blocking this node from being
+ * written:
+ */
+ struct list_head write_blocked;
+
+ /*
+ * Also for asynchronous splits/interior node updates:
+ * If a btree node isn't reachable yet, we don't want to kick off
+ * another write - because that write also won't yet be reachable and
+ * marking it as completed before it's reachable would be incorrect:
+ */
+ unsigned long will_make_reachable;
+
+ struct open_buckets ob;
+
+ /* lru list */
+ struct list_head list;
+};
+
+struct btree_cache {
+ struct rhashtable table;
+ bool table_init_done;
+ /*
+ * We never free a struct btree, except on shutdown - we just put it on
+ * the btree_cache_freed list and reuse it later. This simplifies the
+ * code, and it doesn't cost us much memory as the memory usage is
+ * dominated by buffers that hold the actual btree node data and those
+ * can be freed - and the number of struct btrees allocated is
+ * effectively bounded.
+ *
+ * btree_cache_freeable effectively is a small cache - we use it because
+ * high order page allocations can be rather expensive, and it's quite
+ * common to delete and allocate btree nodes in quick succession. It
+ * should never grow past ~2-3 nodes in practice.
+ */
+ struct mutex lock;
+ struct list_head live;
+ struct list_head freeable;
+ struct list_head freed_pcpu;
+ struct list_head freed_nonpcpu;
+
+ /* Number of elements in live + freeable lists */
+ unsigned used;
+ unsigned reserve;
+ atomic_t dirty;
+ struct shrinker shrink;
+
+ /*
+ * If we need to allocate memory for a new btree node and that
+ * allocation fails, we can cannibalize another node in the btree cache
+ * to satisfy the allocation - lock to guarantee only one thread does
+ * this at a time:
+ */
+ struct task_struct *alloc_lock;
+ struct closure_waitlist alloc_wait;
+};
+
+struct btree_node_iter {
+ struct btree_node_iter_set {
+ u16 k, end;
+ } data[MAX_BSETS];
+};
+
+/*
+ * Iterate over all possible positions, synthesizing deleted keys for holes:
+ */
+static const __maybe_unused u16 BTREE_ITER_SLOTS = 1 << 0;
+static const __maybe_unused u16 BTREE_ITER_ALL_LEVELS = 1 << 1;
+/*
+ * Indicates that intent locks should be taken on leaf nodes, because we expect
+ * to be doing updates:
+ */
+static const __maybe_unused u16 BTREE_ITER_INTENT = 1 << 2;
+/*
+ * Causes the btree iterator code to prefetch additional btree nodes from disk:
+ */
+static const __maybe_unused u16 BTREE_ITER_PREFETCH = 1 << 3;
+/*
+ * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
+ * @pos or the first key strictly greater than @pos
+ */
+static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS = 1 << 4;
+static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS = 1 << 5;
+static const __maybe_unused u16 BTREE_ITER_CACHED = 1 << 6;
+static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE = 1 << 7;
+static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES = 1 << 8;
+static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL = 1 << 9;
+static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS = 1 << 10;
+static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS = 1 << 11;
+static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS = 1 << 12;
+static const __maybe_unused u16 BTREE_ITER_NOPRESERVE = 1 << 13;
+static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL = 1 << 14;
+static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL = 1 << 15;
+#define __BTREE_ITER_FLAGS_END 16
+
+enum btree_path_uptodate {
+ BTREE_ITER_UPTODATE = 0,
+ BTREE_ITER_NEED_RELOCK = 1,
+ BTREE_ITER_NEED_TRAVERSE = 2,
+};
+
+#if defined(CONFIG_BCACHEFS_LOCK_TIME_STATS) || defined(CONFIG_BCACHEFS_DEBUG)
+#define TRACK_PATH_ALLOCATED
+#endif
+
+struct btree_path {
+ u8 idx;
+ u8 sorted_idx;
+ u8 ref;
+ u8 intent_ref;
+
+ /* btree_iter_copy starts here: */
+ struct bpos pos;
+
+ enum btree_id btree_id:5;
+ bool cached:1;
+ bool preserve:1;
+ enum btree_path_uptodate uptodate:2;
+ /*
+ * When true, failing to relock this path will cause the transaction to
+ * restart:
+ */
+ bool should_be_locked:1;
+ unsigned level:3,
+ locks_want:3;
+ u8 nodes_locked;
+
+ struct btree_path_level {
+ struct btree *b;
+ struct btree_node_iter iter;
+ u32 lock_seq;
+#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
+ u64 lock_taken_time;
+#endif
+ } l[BTREE_MAX_DEPTH];
+#ifdef TRACK_PATH_ALLOCATED
+ unsigned long ip_allocated;
+#endif
+};
+
+static inline struct btree_path_level *path_l(struct btree_path *path)
+{
+ return path->l + path->level;
+}
+
+static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
+{
+#ifdef TRACK_PATH_ALLOCATED
+ return path->ip_allocated;
+#else
+ return _THIS_IP_;
+#endif
+}
+
+/*
+ * @pos - iterator's current position
+ * @level - current btree depth
+ * @locks_want - btree level below which we start taking intent locks
+ * @nodes_locked - bitmask indicating which nodes in @nodes are locked
+ * @nodes_intent_locked - bitmask indicating which locks are intent locks
+ */
+struct btree_iter {
+ struct btree_trans *trans;
+ struct btree_path *path;
+ struct btree_path *update_path;
+ struct btree_path *key_cache_path;
+
+ enum btree_id btree_id:8;
+ unsigned min_depth:3;
+ unsigned advanced:1;
+
+ /* btree_iter_copy starts here: */
+ u16 flags;
+
+ /* When we're filtering by snapshot, the snapshot ID we're looking for: */
+ unsigned snapshot;
+
+ struct bpos pos;
+ /*
+ * Current unpacked key - so that bch2_btree_iter_next()/
+ * bch2_btree_iter_next_slot() can correctly advance pos.
+ */
+ struct bkey k;
+
+ /* BTREE_ITER_WITH_JOURNAL: */
+ size_t journal_idx;
+ struct bpos journal_pos;
+#ifdef TRACK_PATH_ALLOCATED
+ unsigned long ip_allocated;
+#endif
+};
+
+struct btree_key_cache_freelist {
+ struct bkey_cached *objs[16];
+ unsigned nr;
+};
+
+struct btree_key_cache {
+ struct mutex lock;
+ struct rhashtable table;
+ bool table_init_done;
+ struct list_head freed_pcpu;
+ struct list_head freed_nonpcpu;
+ struct shrinker shrink;
+ unsigned shrink_iter;
+ struct btree_key_cache_freelist __percpu *pcpu_freed;
+
+ atomic_long_t nr_freed;
+ atomic_long_t nr_keys;
+ atomic_long_t nr_dirty;
+};
+
+struct bkey_cached_key {
+ u32 btree_id;
+ struct bpos pos;
+} __packed __aligned(4);
+
+#define BKEY_CACHED_ACCESSED 0
+#define BKEY_CACHED_DIRTY 1
+
+struct bkey_cached {
+ struct btree_bkey_cached_common c;
+
+ unsigned long flags;
+ u16 u64s;
+ bool valid;
+ u32 btree_trans_barrier_seq;
+ struct bkey_cached_key key;
+
+ struct rhash_head hash;
+ struct list_head list;
+
+ struct journal_preres res;
+ struct journal_entry_pin journal;
+ u64 seq;
+
+ struct bkey_i *k;
+};
+
+static inline struct bpos btree_node_pos(struct btree_bkey_cached_common *b)
+{
+ return !b->cached
+ ? container_of(b, struct btree, c)->key.k.p
+ : container_of(b, struct bkey_cached, c)->key.pos;
+}
+
+struct btree_insert_entry {
+ unsigned flags;
+ u8 bkey_type;
+ enum btree_id btree_id:8;
+ u8 level:4;
+ bool cached:1;
+ bool insert_trigger_run:1;
+ bool overwrite_trigger_run:1;
+ bool key_cache_already_flushed:1;
+ /*
+ * @old_k may be a key from the journal; @old_btree_u64s always refers
+ * to the size of the key being overwritten in the btree:
+ */
+ u8 old_btree_u64s;
+ struct bkey_i *k;
+ struct btree_path *path;
+ u64 seq;
+ /* key being overwritten: */
+ struct bkey old_k;
+ const struct bch_val *old_v;
+ unsigned long ip_allocated;
+};
+
+#ifndef CONFIG_LOCKDEP
+#define BTREE_ITER_MAX 64
+#else
+#define BTREE_ITER_MAX 32
+#endif
+
+struct btree_trans_commit_hook;
+typedef int (btree_trans_commit_hook_fn)(struct btree_trans *, struct btree_trans_commit_hook *);
+
+struct btree_trans_commit_hook {
+ btree_trans_commit_hook_fn *fn;
+ struct btree_trans_commit_hook *next;
+};
+
+#define BTREE_TRANS_MEM_MAX (1U << 16)
+
+#define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS 10000
+
+struct btree_trans {
+ struct bch_fs *c;
+ const char *fn;
+ struct closure ref;
+ struct list_head list;
+ u64 last_begin_time;
+
+ u8 lock_may_not_fail;
+ u8 lock_must_abort;
+ struct btree_bkey_cached_common *locking;
+ struct six_lock_waiter locking_wait;
+
+ int srcu_idx;
+
+ u8 fn_idx;
+ u8 nr_sorted;
+ u8 nr_updates;
+ u8 nr_wb_updates;
+ u8 wb_updates_size;
+ bool used_mempool:1;
+ bool in_traverse_all:1;
+ bool paths_sorted:1;
+ bool memory_allocation_failure:1;
+ bool journal_transaction_names:1;
+ bool journal_replay_not_finished:1;
+ bool notrace_relock_fail:1;
+ enum bch_errcode restarted:16;
+ u32 restart_count;
+ unsigned long last_begin_ip;
+ unsigned long last_restarted_ip;
+ unsigned long srcu_lock_time;
+
+ /*
+ * For when bch2_trans_update notices we'll be splitting a compressed
+ * extent:
+ */
+ unsigned extra_journal_res;
+ unsigned nr_max_paths;
+
+ u64 paths_allocated;
+
+ unsigned mem_top;
+ unsigned mem_max;
+ unsigned mem_bytes;
+ void *mem;
+
+ u8 sorted[BTREE_ITER_MAX + 8];
+ struct btree_path paths[BTREE_ITER_MAX];
+ struct btree_insert_entry updates[BTREE_ITER_MAX];
+ struct btree_write_buffered_key *wb_updates;
+
+ /* update path: */
+ struct btree_trans_commit_hook *hooks;
+ darray_u64 extra_journal_entries;
+ struct journal_entry_pin *journal_pin;
+
+ struct journal_res journal_res;
+ struct journal_preres journal_preres;
+ u64 *journal_seq;
+ struct disk_reservation *disk_res;
+ unsigned journal_u64s;
+ unsigned journal_preres_u64s;
+ struct replicas_delta_list *fs_usage_deltas;
+};
+
+#define BCH_BTREE_WRITE_TYPES() \
+ x(initial, 0) \
+ x(init_next_bset, 1) \
+ x(cache_reclaim, 2) \
+ x(journal_reclaim, 3) \
+ x(interior, 4)
+
+enum btree_write_type {
+#define x(t, n) BTREE_WRITE_##t,
+ BCH_BTREE_WRITE_TYPES()
+#undef x
+ BTREE_WRITE_TYPE_NR,
+};
+
+#define BTREE_WRITE_TYPE_MASK (roundup_pow_of_two(BTREE_WRITE_TYPE_NR) - 1)
+#define BTREE_WRITE_TYPE_BITS ilog2(roundup_pow_of_two(BTREE_WRITE_TYPE_NR))
+
+#define BTREE_FLAGS() \
+ x(read_in_flight) \
+ x(read_error) \
+ x(dirty) \
+ x(need_write) \
+ x(write_blocked) \
+ x(will_make_reachable) \
+ x(noevict) \
+ x(write_idx) \
+ x(accessed) \
+ x(write_in_flight) \
+ x(write_in_flight_inner) \
+ x(just_written) \
+ x(dying) \
+ x(fake) \
+ x(need_rewrite) \
+ x(never_write)
+
+enum btree_flags {
+ /* First bits for btree node write type */
+ BTREE_NODE_FLAGS_START = BTREE_WRITE_TYPE_BITS - 1,
+#define x(flag) BTREE_NODE_##flag,
+ BTREE_FLAGS()
+#undef x
+};
+
+#define x(flag) \
+static inline bool btree_node_ ## flag(struct btree *b) \
+{ return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
+ \
+static inline void set_btree_node_ ## flag(struct btree *b) \
+{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
+ \
+static inline void clear_btree_node_ ## flag(struct btree *b) \
+{ clear_bit(BTREE_NODE_ ## flag, &b->flags); }
+
+BTREE_FLAGS()
+#undef x
+
+static inline struct btree_write *btree_current_write(struct btree *b)
+{
+ return b->writes + btree_node_write_idx(b);
+}
+
+static inline struct btree_write *btree_prev_write(struct btree *b)
+{
+ return b->writes + (btree_node_write_idx(b) ^ 1);
+}
+
+static inline struct bset_tree *bset_tree_last(struct btree *b)
+{
+ EBUG_ON(!b->nsets);
+ return b->set + b->nsets - 1;
+}
+
+static inline void *
+__btree_node_offset_to_ptr(const struct btree *b, u16 offset)
+{
+ return (void *) ((u64 *) b->data + 1 + offset);
+}
+
+static inline u16
+__btree_node_ptr_to_offset(const struct btree *b, const void *p)
+{
+ u16 ret = (u64 *) p - 1 - (u64 *) b->data;
+
+ EBUG_ON(__btree_node_offset_to_ptr(b, ret) != p);
+ return ret;
+}
+
+static inline struct bset *bset(const struct btree *b,
+ const struct bset_tree *t)
+{
+ return __btree_node_offset_to_ptr(b, t->data_offset);
+}
+
+static inline void set_btree_bset_end(struct btree *b, struct bset_tree *t)
+{
+ t->end_offset =
+ __btree_node_ptr_to_offset(b, vstruct_last(bset(b, t)));
+}
+
+static inline void set_btree_bset(struct btree *b, struct bset_tree *t,
+ const struct bset *i)
+{
+ t->data_offset = __btree_node_ptr_to_offset(b, i);
+ set_btree_bset_end(b, t);
+}
+
+static inline struct bset *btree_bset_first(struct btree *b)
+{
+ return bset(b, b->set);
+}
+
+static inline struct bset *btree_bset_last(struct btree *b)
+{
+ return bset(b, bset_tree_last(b));
+}
+
+static inline u16
+__btree_node_key_to_offset(const struct btree *b, const struct bkey_packed *k)
+{
+ return __btree_node_ptr_to_offset(b, k);
+}
+
+static inline struct bkey_packed *
+__btree_node_offset_to_key(const struct btree *b, u16 k)
+{
+ return __btree_node_offset_to_ptr(b, k);
+}
+
+static inline unsigned btree_bkey_first_offset(const struct bset_tree *t)
+{
+ return t->data_offset + offsetof(struct bset, _data) / sizeof(u64);
+}
+
+#define btree_bkey_first(_b, _t) \
+({ \
+ EBUG_ON(bset(_b, _t)->start != \
+ __btree_node_offset_to_key(_b, btree_bkey_first_offset(_t)));\
+ \
+ bset(_b, _t)->start; \
+})
+
+#define btree_bkey_last(_b, _t) \
+({ \
+ EBUG_ON(__btree_node_offset_to_key(_b, (_t)->end_offset) != \
+ vstruct_last(bset(_b, _t))); \
+ \
+ __btree_node_offset_to_key(_b, (_t)->end_offset); \
+})
+
+static inline unsigned bset_u64s(struct bset_tree *t)
+{
+ return t->end_offset - t->data_offset -
+ sizeof(struct bset) / sizeof(u64);
+}
+
+static inline unsigned bset_dead_u64s(struct btree *b, struct bset_tree *t)
+{
+ return bset_u64s(t) - b->nr.bset_u64s[t - b->set];
+}
+
+static inline unsigned bset_byte_offset(struct btree *b, void *i)
+{
+ return i - (void *) b->data;
+}
+
+enum btree_node_type {
+#define x(kwd, val, ...) BKEY_TYPE_##kwd = val,
+ BCH_BTREE_IDS()
+#undef x
+ BKEY_TYPE_btree,
+};
+
+/* Type of a key in btree @id at level @level: */
+static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
+{
+ return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
+}
+
+/* Type of keys @b contains: */
+static inline enum btree_node_type btree_node_type(struct btree *b)
+{
+ return __btree_node_type(b->c.level, b->c.btree_id);
+}
+
+#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
+ (BIT(BKEY_TYPE_extents)| \
+ BIT(BKEY_TYPE_alloc)| \
+ BIT(BKEY_TYPE_inodes)| \
+ BIT(BKEY_TYPE_stripes)| \
+ BIT(BKEY_TYPE_reflink)| \
+ BIT(BKEY_TYPE_btree))
+
+#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
+ (BIT(BKEY_TYPE_alloc)| \
+ BIT(BKEY_TYPE_inodes)| \
+ BIT(BKEY_TYPE_stripes)| \
+ BIT(BKEY_TYPE_snapshots))
+
+#define BTREE_NODE_TYPE_HAS_TRIGGERS \
+ (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
+ BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
+
+static inline bool btree_node_type_needs_gc(enum btree_node_type type)
+{
+ return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
+}
+
+static inline bool btree_node_type_is_extents(enum btree_node_type type)
+{
+ const unsigned mask = 0
+#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << nr)
+ BCH_BTREE_IDS()
+#undef x
+ ;
+
+ return (1U << type) & mask;
+}
+
+static inline bool btree_id_is_extents(enum btree_id btree)
+{
+ return btree_node_type_is_extents((enum btree_node_type) btree);
+}
+
+static inline bool btree_type_has_snapshots(enum btree_id id)
+{
+ const unsigned mask = 0
+#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
+ BCH_BTREE_IDS()
+#undef x
+ ;
+
+ return (1U << id) & mask;
+}
+
+static inline bool btree_type_has_ptrs(enum btree_id id)
+{
+ const unsigned mask = 0
+#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
+ BCH_BTREE_IDS()
+#undef x
+ ;
+
+ return (1U << id) & mask;
+}
+
+struct btree_root {
+ struct btree *b;
+
+ /* On disk root - see async splits: */
+ __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
+ u8 level;
+ u8 alive;
+ s8 error;
+};
+
+enum btree_gc_coalesce_fail_reason {
+ BTREE_GC_COALESCE_FAIL_RESERVE_GET,
+ BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
+ BTREE_GC_COALESCE_FAIL_FORMAT_FITS,
+};
+
+enum btree_node_sibling {
+ btree_prev_sib,
+ btree_next_sib,
+};
+
+#endif /* _BCACHEFS_BTREE_TYPES_H */
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
new file mode 100644
index 000000000000..324767c0ddcc
--- /dev/null
+++ b/fs/bcachefs/btree_update.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_update.h"
+#include "btree_iter.h"
+#include "btree_journal_iter.h"
+#include "btree_locking.h"
+#include "buckets.h"
+#include "debug.h"
+#include "errcode.h"
+#include "error.h"
+#include "extents.h"
+#include "keylist.h"
+#include "snapshot.h"
+#include "trace.h"
+
+static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
+ const struct btree_insert_entry *r)
+{
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ cmp_int(l->cached, r->cached) ?:
+ -cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->k->k.p, r->k->k.p);
+}
+
+static int __must_check
+bch2_trans_update_by_path(struct btree_trans *, struct btree_path *,
+ struct bkey_i *, enum btree_update_flags,
+ unsigned long ip);
+
+static noinline int extent_front_merge(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct bkey_i **insert,
+ enum btree_update_flags flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i *update;
+ int ret;
+
+ update = bch2_bkey_make_mut_noupdate(trans, k);
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ return ret;
+
+ if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
+ return 0;
+
+ ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?:
+ bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ ret = bch2_btree_delete_at(trans, iter, flags);
+ if (ret)
+ return ret;
+
+ *insert = update;
+ return 0;
+}
+
+static noinline int extent_back_merge(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *insert,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ int ret;
+
+ ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
+ bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 0;
+
+ bch2_bkey_merge(c, bkey_i_to_s(insert), k);
+ return 0;
+}
+
+/*
+ * When deleting, check if we need to emit a whiteout (because we're overwriting
+ * something in an ancestor snapshot)
+ */
+static int need_whiteout_for_snapshot(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot = pos.snapshot;
+ int ret;
+
+ if (!bch2_snapshot_parent(trans->c, pos.snapshot))
+ return 0;
+
+ pos.snapshot++;
+
+ for_each_btree_key_norestart(trans, iter, btree_id, pos,
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_NOPRESERVE, k, ret) {
+ if (!bkey_eq(k.k->p, pos))
+ break;
+
+ if (bch2_snapshot_is_ancestor(trans->c, snapshot,
+ k.k->p.snapshot)) {
+ ret = !bkey_whiteout(k.k);
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
+ enum btree_id id,
+ struct bpos old_pos,
+ struct bpos new_pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter old_iter, new_iter = { NULL };
+ struct bkey_s_c old_k, new_k;
+ snapshot_id_list s;
+ struct bkey_i *update;
+ int ret = 0;
+
+ if (!bch2_snapshot_has_children(c, old_pos.snapshot))
+ return 0;
+
+ darray_init(&s);
+
+ bch2_trans_iter_init(trans, &old_iter, id, old_pos,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS);
+ while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
+ !(ret = bkey_err(old_k)) &&
+ bkey_eq(old_pos, old_k.k->p)) {
+ struct bpos whiteout_pos =
+ SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);;
+
+ if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
+ snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
+ continue;
+
+ new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
+ ret = bkey_err(new_k);
+ if (ret)
+ break;
+
+ if (new_k.k->type == KEY_TYPE_deleted) {
+ update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
+ ret = PTR_ERR_OR_ZERO(update);
+ if (ret)
+ break;
+
+ bkey_init(&update->k);
+ update->k.p = whiteout_pos;
+ update->k.type = KEY_TYPE_whiteout;
+
+ ret = bch2_trans_update(trans, &new_iter, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ }
+ bch2_trans_iter_exit(trans, &new_iter);
+
+ ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &new_iter);
+ bch2_trans_iter_exit(trans, &old_iter);
+ darray_exit(&s);
+
+ return ret;
+}
+
+int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
+ struct btree_iter *iter,
+ enum btree_update_flags flags,
+ struct bkey_s_c old,
+ struct bkey_s_c new)
+{
+ enum btree_id btree_id = iter->btree_id;
+ struct bkey_i *update;
+ struct bpos new_start = bkey_start_pos(new.k);
+ bool front_split = bkey_lt(bkey_start_pos(old.k), new_start);
+ bool back_split = bkey_gt(old.k->p, new.k->p);
+ int ret = 0, compressed_sectors;
+
+ /*
+ * If we're going to be splitting a compressed extent, note it
+ * so that __bch2_trans_commit() can increase our disk
+ * reservation:
+ */
+ if (((front_split && back_split) ||
+ ((front_split || back_split) && old.k->p.snapshot != new.k->p.snapshot)) &&
+ (compressed_sectors = bch2_bkey_sectors_compressed(old)))
+ trans->extra_journal_res += compressed_sectors;
+
+ if (front_split) {
+ update = bch2_bkey_make_mut_noupdate(trans, old);
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ return ret;
+
+ bch2_cut_back(new_start, update);
+
+ ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
+ old.k->p, update->k.p) ?:
+ bch2_btree_insert_nonextent(trans, btree_id, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
+ if (ret)
+ return ret;
+ }
+
+ /* If we're overwriting in a different snapshot - middle split: */
+ if (old.k->p.snapshot != new.k->p.snapshot &&
+ (front_split || back_split)) {
+ update = bch2_bkey_make_mut_noupdate(trans, old);
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ return ret;
+
+ bch2_cut_front(new_start, update);
+ bch2_cut_back(new.k->p, update);
+
+ ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
+ old.k->p, update->k.p) ?:
+ bch2_btree_insert_nonextent(trans, btree_id, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
+ if (ret)
+ return ret;
+ }
+
+ if (bkey_le(old.k->p, new.k->p)) {
+ update = bch2_trans_kmalloc(trans, sizeof(*update));
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ return ret;
+
+ bkey_init(&update->k);
+ update->k.p = old.k->p;
+ update->k.p.snapshot = new.k->p.snapshot;
+
+ if (new.k->p.snapshot != old.k->p.snapshot) {
+ update->k.type = KEY_TYPE_whiteout;
+ } else if (btree_type_has_snapshots(btree_id)) {
+ ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ update->k.type = KEY_TYPE_whiteout;
+ }
+
+ ret = bch2_btree_insert_nonextent(trans, btree_id, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
+ if (ret)
+ return ret;
+ }
+
+ if (back_split) {
+ update = bch2_bkey_make_mut_noupdate(trans, old);
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ return ret;
+
+ bch2_cut_front(new.k->p, update);
+
+ ret = bch2_trans_update_by_path(trans, iter->path, update,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ flags, _RET_IP_);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bch2_trans_update_extent(struct btree_trans *trans,
+ struct btree_iter *orig_iter,
+ struct bkey_i *insert,
+ enum btree_update_flags flags)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ enum btree_id btree_id = orig_iter->btree_id;
+ int ret = 0;
+
+ bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
+ BTREE_ITER_INTENT|
+ BTREE_ITER_WITH_UPDATES|
+ BTREE_ITER_NOT_EXTENTS);
+ k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
+ if ((ret = bkey_err(k)))
+ goto err;
+ if (!k.k)
+ goto out;
+
+ if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
+ if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
+ ret = extent_front_merge(trans, &iter, k, &insert, flags);
+ if (ret)
+ goto err;
+ }
+
+ goto next;
+ }
+
+ while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
+ bool done = bkey_lt(insert->k.p, k.k->p);
+
+ ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
+ if (ret)
+ goto err;
+
+ if (done)
+ goto out;
+next:
+ bch2_btree_iter_advance(&iter);
+ k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
+ if ((ret = bkey_err(k)))
+ goto err;
+ if (!k.k)
+ goto out;
+ }
+
+ if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
+ ret = extent_back_merge(trans, &iter, insert, k);
+ if (ret)
+ goto err;
+ }
+out:
+ if (!bkey_deleted(&insert->k))
+ ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+static noinline int flush_new_cached_update(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_insert_entry *i,
+ enum btree_update_flags flags,
+ unsigned long ip)
+{
+ struct btree_path *btree_path;
+ struct bkey k;
+ int ret;
+
+ btree_path = bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
+ BTREE_ITER_INTENT, _THIS_IP_);
+ ret = bch2_btree_path_traverse(trans, btree_path, 0);
+ if (ret)
+ goto out;
+
+ /*
+ * The old key in the insert entry might actually refer to an existing
+ * key in the btree that has been deleted from cache and not yet
+ * flushed. Check for this and skip the flush so we don't run triggers
+ * against a stale key.
+ */
+ bch2_btree_path_peek_slot_exact(btree_path, &k);
+ if (!bkey_deleted(&k))
+ goto out;
+
+ i->key_cache_already_flushed = true;
+ i->flags |= BTREE_TRIGGER_NORUN;
+
+ btree_path_set_should_be_locked(btree_path);
+ ret = bch2_trans_update_by_path(trans, btree_path, i->k, flags, ip);
+out:
+ bch2_path_put(trans, btree_path, true);
+ return ret;
+}
+
+static int __must_check
+bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
+ struct bkey_i *k, enum btree_update_flags flags,
+ unsigned long ip)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_insert_entry *i, n;
+ u64 seq = 0;
+ int cmp;
+
+ EBUG_ON(!path->should_be_locked);
+ EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
+ EBUG_ON(!bpos_eq(k->k.p, path->pos));
+
+ /*
+ * The transaction journal res hasn't been allocated at this point.
+ * That occurs at commit time. Reuse the seq field to pass in the seq
+ * of a prejournaled key.
+ */
+ if (flags & BTREE_UPDATE_PREJOURNAL)
+ seq = trans->journal_res.seq;
+
+ n = (struct btree_insert_entry) {
+ .flags = flags,
+ .bkey_type = __btree_node_type(path->level, path->btree_id),
+ .btree_id = path->btree_id,
+ .level = path->level,
+ .cached = path->cached,
+ .path = path,
+ .k = k,
+ .seq = seq,
+ .ip_allocated = ip,
+ };
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans_for_each_update(trans, i)
+ BUG_ON(i != trans->updates &&
+ btree_insert_entry_cmp(i - 1, i) >= 0);
+#endif
+
+ /*
+ * Pending updates are kept sorted: first, find position of new update,
+ * then delete/trim any updates the new update overwrites:
+ */
+ trans_for_each_update(trans, i) {
+ cmp = btree_insert_entry_cmp(&n, i);
+ if (cmp <= 0)
+ break;
+ }
+
+ if (!cmp && i < trans->updates + trans->nr_updates) {
+ EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
+
+ bch2_path_put(trans, i->path, true);
+ i->flags = n.flags;
+ i->cached = n.cached;
+ i->k = n.k;
+ i->path = n.path;
+ i->seq = n.seq;
+ i->ip_allocated = n.ip_allocated;
+ } else {
+ array_insert_item(trans->updates, trans->nr_updates,
+ i - trans->updates, n);
+
+ i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
+ i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
+
+ if (unlikely(trans->journal_replay_not_finished)) {
+ struct bkey_i *j_k =
+ bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
+
+ if (j_k) {
+ i->old_k = j_k->k;
+ i->old_v = &j_k->v;
+ }
+ }
+ }
+
+ __btree_path_get(i->path, true);
+
+ /*
+ * If a key is present in the key cache, it must also exist in the
+ * btree - this is necessary for cache coherency. When iterating over
+ * a btree that's cached in the key cache, the btree iter code checks
+ * the key cache - but the key has to exist in the btree for that to
+ * work:
+ */
+ if (path->cached && bkey_deleted(&i->old_k))
+ return flush_new_cached_update(trans, path, i, flags, ip);
+
+ return 0;
+}
+
+static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree_path *path)
+{
+ if (!iter->key_cache_path ||
+ !iter->key_cache_path->should_be_locked ||
+ !bpos_eq(iter->key_cache_path->pos, iter->pos)) {
+ struct bkey_cached *ck;
+ int ret;
+
+ if (!iter->key_cache_path)
+ iter->key_cache_path =
+ bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_CACHED, _THIS_IP_);
+
+ iter->key_cache_path =
+ bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
+ iter->flags & BTREE_ITER_INTENT,
+ _THIS_IP_);
+
+ ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
+ BTREE_ITER_CACHED);
+ if (unlikely(ret))
+ return ret;
+
+ ck = (void *) iter->key_cache_path->l[0].b;
+
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
+ }
+
+ btree_path_set_should_be_locked(iter->key_cache_path);
+ }
+
+ return 0;
+}
+
+int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_i *k, enum btree_update_flags flags)
+{
+ struct btree_path *path = iter->update_path ?: iter->path;
+ int ret;
+
+ if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ return bch2_trans_update_extent(trans, iter, k, flags);
+
+ if (bkey_deleted(&k->k) &&
+ !(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
+ (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
+ ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (ret)
+ k->k.type = KEY_TYPE_whiteout;
+ }
+
+ /*
+ * Ensure that updates to cached btrees go to the key cache:
+ */
+ if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
+ !path->cached &&
+ !path->level &&
+ btree_id_cached(trans->c, path->btree_id)) {
+ ret = bch2_trans_update_get_key_cache(trans, iter, path);
+ if (ret)
+ return ret;
+
+ path = iter->key_cache_path;
+ }
+
+ return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
+}
+
+/*
+ * Add a transaction update for a key that has already been journaled.
+ */
+int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq,
+ struct btree_iter *iter, struct bkey_i *k,
+ enum btree_update_flags flags)
+{
+ trans->journal_res.seq = seq;
+ return bch2_trans_update(trans, iter, k, flags|BTREE_UPDATE_NOJOURNAL|
+ BTREE_UPDATE_PREJOURNAL);
+}
+
+int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
+ enum btree_id btree,
+ struct bkey_i *k)
+{
+ struct btree_write_buffered_key *i;
+ int ret;
+
+ EBUG_ON(trans->nr_wb_updates > trans->wb_updates_size);
+ EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
+
+ trans_for_each_wb_update(trans, i) {
+ if (i->btree == btree && bpos_eq(i->k.k.p, k->k.p)) {
+ bkey_copy(&i->k, k);
+ return 0;
+ }
+ }
+
+ if (!trans->wb_updates ||
+ trans->nr_wb_updates == trans->wb_updates_size) {
+ struct btree_write_buffered_key *u;
+
+ if (trans->nr_wb_updates == trans->wb_updates_size) {
+ struct btree_transaction_stats *s = btree_trans_stats(trans);
+
+ BUG_ON(trans->wb_updates_size > U8_MAX / 2);
+ trans->wb_updates_size = max(1, trans->wb_updates_size * 2);
+ if (s)
+ s->wb_updates_size = trans->wb_updates_size;
+ }
+
+ u = bch2_trans_kmalloc_nomemzero(trans,
+ trans->wb_updates_size *
+ sizeof(struct btree_write_buffered_key));
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ return ret;
+
+ if (trans->nr_wb_updates)
+ memcpy(u, trans->wb_updates, trans->nr_wb_updates *
+ sizeof(struct btree_write_buffered_key));
+ trans->wb_updates = u;
+ }
+
+ trans->wb_updates[trans->nr_wb_updates] = (struct btree_write_buffered_key) {
+ .btree = btree,
+ };
+
+ bkey_copy(&trans->wb_updates[trans->nr_wb_updates].k, k);
+ trans->nr_wb_updates++;
+
+ return 0;
+}
+
+int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
+ enum btree_id btree, struct bpos end)
+{
+ struct bkey_s_c k;
+ int ret = 0;
+
+ bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_INTENT);
+ k = bch2_btree_iter_prev(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ bch2_btree_iter_advance(iter);
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ BUG_ON(k.k->type != KEY_TYPE_deleted);
+
+ if (bkey_gt(k.k->p, end)) {
+ ret = -BCH_ERR_ENOSPC_btree_slot;
+ goto err;
+ }
+
+ return 0;
+err:
+ bch2_trans_iter_exit(trans, iter);
+ return ret;
+}
+
+void bch2_trans_commit_hook(struct btree_trans *trans,
+ struct btree_trans_commit_hook *h)
+{
+ h->next = trans->hooks;
+ trans->hooks = h;
+}
+
+int bch2_btree_insert_nonextent(struct btree_trans *trans,
+ enum btree_id btree, struct bkey_i *k,
+ enum btree_update_flags flags)
+{
+ struct btree_iter iter;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, btree, k->k.p,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, k, flags);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
+ struct bkey_i *k, enum btree_update_flags flags)
+{
+ struct btree_iter iter;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, k, flags);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/**
+ * bch2_btree_insert - insert keys into the extent btree
+ * @c: pointer to struct bch_fs
+ * @id: btree to insert into
+ * @k: key to insert
+ * @disk_res: must be non-NULL whenever inserting or potentially
+ * splitting data extents
+ * @flags: transaction commit flags
+ *
+ * Returns: 0 on success, error code on failure
+ */
+int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
+ struct disk_reservation *disk_res, int flags)
+{
+ return bch2_trans_do(c, disk_res, NULL, flags,
+ bch2_btree_insert_trans(trans, id, k, 0));
+}
+
+int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter,
+ unsigned len, unsigned update_flags)
+{
+ struct bkey_i *k;
+
+ k = bch2_trans_kmalloc(trans, sizeof(*k));
+ if (IS_ERR(k))
+ return PTR_ERR(k);
+
+ bkey_init(&k->k);
+ k->k.p = iter->pos;
+ bch2_key_resize(&k->k, len);
+ return bch2_trans_update(trans, iter, k, update_flags);
+}
+
+int bch2_btree_delete_at(struct btree_trans *trans,
+ struct btree_iter *iter, unsigned update_flags)
+{
+ return bch2_btree_delete_extent_at(trans, iter, 0, update_flags);
+}
+
+int bch2_btree_delete_at_buffered(struct btree_trans *trans,
+ enum btree_id btree, struct bpos pos)
+{
+ struct bkey_i *k;
+
+ k = bch2_trans_kmalloc(trans, sizeof(*k));
+ if (IS_ERR(k))
+ return PTR_ERR(k);
+
+ bkey_init(&k->k);
+ k->k.p = pos;
+ return bch2_trans_update_buffered(trans, btree, k);
+}
+
+int bch2_btree_delete(struct btree_trans *trans,
+ enum btree_id btree, struct bpos pos,
+ unsigned update_flags)
+{
+ struct btree_iter iter;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, btree, pos,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_delete_at(trans, &iter, update_flags);
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
+ struct bpos start, struct bpos end,
+ unsigned update_flags,
+ u64 *journal_seq)
+{
+ u32 restart_count = trans->restart_count;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
+ while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
+ struct disk_reservation disk_res =
+ bch2_disk_reservation_init(trans->c, 0);
+ struct bkey_i delete;
+
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ bkey_init(&delete.k);
+
+ /*
+ * This could probably be more efficient for extents:
+ */
+
+ /*
+ * For extents, iter.pos won't necessarily be the same as
+ * bkey_start_pos(k.k) (for non extents they always will be the
+ * same). It's important that we delete starting from iter.pos
+ * because the range we want to delete could start in the middle
+ * of k.
+ *
+ * (bch2_btree_iter_peek() does guarantee that iter.pos >=
+ * bkey_start_pos(k.k)).
+ */
+ delete.k.p = iter.pos;
+
+ if (iter.flags & BTREE_ITER_IS_EXTENTS)
+ bch2_key_resize(&delete.k,
+ bpos_min(end, k.k->p).offset -
+ iter.pos.offset);
+
+ ret = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
+ bch2_trans_commit(trans, &disk_res, journal_seq,
+ BTREE_INSERT_NOFAIL);
+ bch2_disk_reservation_put(trans->c, &disk_res);
+err:
+ /*
+ * the bch2_trans_begin() call is in a weird place because we
+ * need to call it after every transaction commit, to avoid path
+ * overflow, but don't want to call it if the delete operation
+ * is a no-op and we have no work to do:
+ */
+ bch2_trans_begin(trans);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret ?: trans_was_restarted(trans, restart_count);
+}
+
+/*
+ * bch_btree_delete_range - delete everything within a given range
+ *
+ * Range is a half open interval - [start, end)
+ */
+int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
+ struct bpos start, struct bpos end,
+ unsigned update_flags,
+ u64 *journal_seq)
+{
+ int ret = bch2_trans_run(c,
+ bch2_btree_delete_range_trans(trans, id, start, end,
+ update_flags, journal_seq));
+ if (ret == -BCH_ERR_transaction_restart_nested)
+ ret = 0;
+ return ret;
+}
+
+int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
+ struct bpos pos, bool set)
+{
+ struct bkey_i *k;
+ int ret = 0;
+
+ k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
+ ret = PTR_ERR_OR_ZERO(k);
+ if (unlikely(ret))
+ return ret;
+
+ bkey_init(&k->k);
+ k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
+ k->k.p = pos;
+
+ return bch2_trans_update_buffered(trans, btree, k);
+}
+
+__printf(2, 0)
+static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list args)
+{
+ struct printbuf buf = PRINTBUF;
+ struct jset_entry_log *l;
+ unsigned u64s;
+ int ret;
+
+ prt_vprintf(&buf, fmt, args);
+ ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
+ if (ret)
+ goto err;
+
+ u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
+
+ ret = darray_make_room(entries, jset_u64s(u64s));
+ if (ret)
+ goto err;
+
+ l = (void *) &darray_top(*entries);
+ l->entry.u64s = cpu_to_le16(u64s);
+ l->entry.btree_id = 0;
+ l->entry.level = 1;
+ l->entry.type = BCH_JSET_ENTRY_log;
+ l->entry.pad[0] = 0;
+ l->entry.pad[1] = 0;
+ l->entry.pad[2] = 0;
+ memcpy(l->d, buf.buf, buf.pos);
+ while (buf.pos & 7)
+ l->d[buf.pos++] = '\0';
+
+ entries->nr += jset_u64s(u64s);
+err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+__printf(3, 0)
+static int
+__bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
+ va_list args)
+{
+ int ret;
+
+ if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
+ ret = __bch2_trans_log_msg(&c->journal.early_journal_entries, fmt, args);
+ } else {
+ ret = bch2_trans_do(c, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|commit_flags,
+ __bch2_trans_log_msg(&trans->extra_journal_entries, fmt, args));
+ }
+
+ return ret;
+}
+
+__printf(2, 3)
+int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ ret = __bch2_fs_log_msg(c, 0, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+/*
+ * Use for logging messages during recovery to enable reserved space and avoid
+ * blocking.
+ */
+__printf(2, 3)
+int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);
+ va_end(args);
+ return ret;
+}
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
new file mode 100644
index 000000000000..9816d2286540
--- /dev/null
+++ b/fs/bcachefs/btree_update.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_UPDATE_H
+#define _BCACHEFS_BTREE_UPDATE_H
+
+#include "btree_iter.h"
+#include "journal.h"
+
+struct bch_fs;
+struct btree;
+
+void bch2_btree_node_prep_for_write(struct btree_trans *,
+ struct btree_path *, struct btree *);
+bool bch2_btree_bset_insert_key(struct btree_trans *, struct btree_path *,
+ struct btree *, struct btree_node_iter *,
+ struct bkey_i *);
+
+int bch2_btree_node_flush0(struct journal *, struct journal_entry_pin *, u64);
+int bch2_btree_node_flush1(struct journal *, struct journal_entry_pin *, u64);
+void bch2_btree_add_journal_pin(struct bch_fs *, struct btree *, u64);
+
+void bch2_btree_insert_key_leaf(struct btree_trans *, struct btree_path *,
+ struct bkey_i *, u64);
+
+enum btree_insert_flags {
+ /* First bits for bch_watermark: */
+ __BTREE_INSERT_NOFAIL = BCH_WATERMARK_BITS,
+ __BTREE_INSERT_NOCHECK_RW,
+ __BTREE_INSERT_LAZY_RW,
+ __BTREE_INSERT_JOURNAL_REPLAY,
+ __BTREE_INSERT_JOURNAL_RECLAIM,
+ __BTREE_INSERT_NOWAIT,
+ __BTREE_INSERT_GC_LOCK_HELD,
+ __BCH_HASH_SET_MUST_CREATE,
+ __BCH_HASH_SET_MUST_REPLACE,
+};
+
+/* Don't check for -ENOSPC: */
+#define BTREE_INSERT_NOFAIL BIT(__BTREE_INSERT_NOFAIL)
+
+#define BTREE_INSERT_NOCHECK_RW BIT(__BTREE_INSERT_NOCHECK_RW)
+#define BTREE_INSERT_LAZY_RW BIT(__BTREE_INSERT_LAZY_RW)
+
+/* Insert is for journal replay - don't get journal reservations: */
+#define BTREE_INSERT_JOURNAL_REPLAY BIT(__BTREE_INSERT_JOURNAL_REPLAY)
+
+/* Insert is being called from journal reclaim path: */
+#define BTREE_INSERT_JOURNAL_RECLAIM BIT(__BTREE_INSERT_JOURNAL_RECLAIM)
+
+/* Don't block on allocation failure (for new btree nodes: */
+#define BTREE_INSERT_NOWAIT BIT(__BTREE_INSERT_NOWAIT)
+#define BTREE_INSERT_GC_LOCK_HELD BIT(__BTREE_INSERT_GC_LOCK_HELD)
+
+#define BCH_HASH_SET_MUST_CREATE BIT(__BCH_HASH_SET_MUST_CREATE)
+#define BCH_HASH_SET_MUST_REPLACE BIT(__BCH_HASH_SET_MUST_REPLACE)
+
+int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *,
+ unsigned, unsigned);
+int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
+int bch2_btree_delete_at_buffered(struct btree_trans *, enum btree_id, struct bpos);
+int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned);
+
+int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
+ struct bkey_i *, enum btree_update_flags);
+
+int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *,
+ enum btree_update_flags);
+int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
+ struct disk_reservation *, int flags);
+
+int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
+ struct bpos, struct bpos, unsigned, u64 *);
+int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
+ struct bpos, struct bpos, unsigned, u64 *);
+
+int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool);
+
+int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
+ struct bpos, struct bpos);
+
+/*
+ * For use when splitting extents in existing snapshots:
+ *
+ * If @old_pos is an interior snapshot node, iterate over descendent snapshot
+ * nodes: for every descendent snapshot in whiche @old_pos is overwritten and
+ * not visible, emit a whiteout at @new_pos.
+ */
+static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
+ enum btree_id btree,
+ struct bpos old_pos,
+ struct bpos new_pos)
+{
+ if (!btree_type_has_snapshots(btree) ||
+ bkey_eq(old_pos, new_pos))
+ return 0;
+
+ return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos);
+}
+
+int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *,
+ enum btree_update_flags,
+ struct bkey_s_c, struct bkey_s_c);
+
+int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
+ enum btree_id, struct bpos);
+
+int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *, enum btree_update_flags);
+int __must_check bch2_trans_update_seq(struct btree_trans *, u64, struct btree_iter *,
+ struct bkey_i *, enum btree_update_flags);
+int __must_check bch2_trans_update_buffered(struct btree_trans *,
+ enum btree_id, struct bkey_i *);
+
+void bch2_trans_commit_hook(struct btree_trans *,
+ struct btree_trans_commit_hook *);
+int __bch2_trans_commit(struct btree_trans *, unsigned);
+
+__printf(2, 3) int bch2_fs_log_msg(struct bch_fs *, const char *, ...);
+__printf(2, 3) int bch2_journal_log_msg(struct bch_fs *, const char *, ...);
+
+/**
+ * bch2_trans_commit - insert keys at given iterator positions
+ *
+ * This is main entry point for btree updates.
+ *
+ * Return values:
+ * -EROFS: filesystem read only
+ * -EIO: journal or btree node IO error
+ */
+static inline int bch2_trans_commit(struct btree_trans *trans,
+ struct disk_reservation *disk_res,
+ u64 *journal_seq,
+ unsigned flags)
+{
+ trans->disk_res = disk_res;
+ trans->journal_seq = journal_seq;
+
+ return __bch2_trans_commit(trans, flags);
+}
+
+#define commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \
+ lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
+ (_journal_seq), (_flags)))
+
+#define nested_commit_do(_trans, _disk_res, _journal_seq, _flags, _do) \
+ nested_lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\
+ (_journal_seq), (_flags)))
+
+#define bch2_trans_run(_c, _do) \
+({ \
+ struct btree_trans *trans = bch2_trans_get(_c); \
+ int _ret = (_do); \
+ bch2_trans_put(trans); \
+ _ret; \
+})
+
+#define bch2_trans_do(_c, _disk_res, _journal_seq, _flags, _do) \
+ bch2_trans_run(_c, commit_do(trans, _disk_res, _journal_seq, _flags, _do))
+
+#define trans_for_each_update(_trans, _i) \
+ for ((_i) = (_trans)->updates; \
+ (_i) < (_trans)->updates + (_trans)->nr_updates; \
+ (_i)++)
+
+#define trans_for_each_wb_update(_trans, _i) \
+ for ((_i) = (_trans)->wb_updates; \
+ (_i) < (_trans)->wb_updates + (_trans)->nr_wb_updates; \
+ (_i)++)
+
+static inline void bch2_trans_reset_updates(struct btree_trans *trans)
+{
+ struct btree_insert_entry *i;
+
+ trans_for_each_update(trans, i)
+ bch2_path_put(trans, i->path, true);
+
+ trans->extra_journal_res = 0;
+ trans->nr_updates = 0;
+ trans->nr_wb_updates = 0;
+ trans->wb_updates = NULL;
+ trans->hooks = NULL;
+ trans->extra_journal_entries.nr = 0;
+
+ if (trans->fs_usage_deltas) {
+ trans->fs_usage_deltas->used = 0;
+ memset((void *) trans->fs_usage_deltas +
+ offsetof(struct replicas_delta_list, memset_start), 0,
+ (void *) &trans->fs_usage_deltas->memset_end -
+ (void *) &trans->fs_usage_deltas->memset_start);
+ }
+}
+
+static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k,
+ unsigned type, unsigned min_bytes)
+{
+ unsigned bytes = max_t(unsigned, min_bytes, bkey_bytes(k.k));
+ struct bkey_i *mut;
+
+ if (type && k.k->type != type)
+ return ERR_PTR(-ENOENT);
+
+ mut = bch2_trans_kmalloc_nomemzero(trans, bytes);
+ if (!IS_ERR(mut)) {
+ bkey_reassemble(mut, k);
+
+ if (unlikely(bytes > bkey_bytes(k.k))) {
+ memset((void *) mut + bkey_bytes(k.k), 0,
+ bytes - bkey_bytes(k.k));
+ mut->k.u64s = DIV_ROUND_UP(bytes, sizeof(u64));
+ }
+ }
+ return mut;
+}
+
+static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *trans, struct bkey_s_c k)
+{
+ return __bch2_bkey_make_mut_noupdate(trans, k, 0, 0);
+}
+
+#define bch2_bkey_make_mut_noupdate_typed(_trans, _k, _type) \
+ bkey_i_to_##_type(__bch2_bkey_make_mut_noupdate(_trans, _k, \
+ KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
+
+static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c *k, unsigned flags,
+ unsigned type, unsigned min_bytes)
+{
+ struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes);
+ int ret;
+
+ if (IS_ERR(mut))
+ return mut;
+
+ ret = bch2_trans_update(trans, iter, mut, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ *k = bkey_i_to_s_c(mut);
+ return mut;
+}
+
+static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c *k, unsigned flags)
+{
+ return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0);
+}
+
+#define bch2_bkey_make_mut_typed(_trans, _iter, _k, _flags, _type) \
+ bkey_i_to_##_type(__bch2_bkey_make_mut(_trans, _iter, _k, _flags,\
+ KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
+
+static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags, unsigned type, unsigned min_bytes)
+{
+ struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
+ btree_id, pos, flags|BTREE_ITER_INTENT, type);
+ struct bkey_i *ret = IS_ERR(k.k)
+ ? ERR_CAST(k.k)
+ : __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
+ if (IS_ERR(ret))
+ bch2_trans_iter_exit(trans, iter);
+ return ret;
+}
+
+static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags)
+{
+ return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0);
+}
+
+static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags, unsigned type, unsigned min_bytes)
+{
+ struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter,
+ btree_id, pos, flags|BTREE_ITER_INTENT, type, min_bytes);
+ int ret;
+
+ if (IS_ERR(mut))
+ return mut;
+
+ ret = bch2_trans_update(trans, iter, mut, flags);
+ if (ret) {
+ bch2_trans_iter_exit(trans, iter);
+ return ERR_PTR(ret);
+ }
+
+ return mut;
+}
+
+static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags, unsigned min_bytes)
+{
+ return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes);
+}
+
+static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
+ struct btree_iter *iter,
+ unsigned btree_id, struct bpos pos,
+ unsigned flags)
+{
+ return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0);
+}
+
+#define bch2_bkey_get_mut_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
+ bkey_i_to_##_type(__bch2_bkey_get_mut(_trans, _iter, \
+ _btree_id, _pos, _flags, \
+ KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
+
+static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter,
+ unsigned flags, unsigned type, unsigned val_size)
+{
+ struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size);
+ int ret;
+
+ if (IS_ERR(k))
+ return k;
+
+ bkey_init(&k->k);
+ k->k.p = iter->pos;
+ k->k.type = type;
+ set_bkey_val_bytes(&k->k, val_size);
+
+ ret = bch2_trans_update(trans, iter, k, flags);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+ return k;
+}
+
+#define bch2_bkey_alloc(_trans, _iter, _flags, _type) \
+ bkey_i_to_##_type(__bch2_bkey_alloc(_trans, _iter, _flags, \
+ KEY_TYPE_##_type, sizeof(struct bch_##_type)))
+
+#endif /* _BCACHEFS_BTREE_UPDATE_H */
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
new file mode 100644
index 000000000000..7dbf6b6c7f34
--- /dev/null
+++ b/fs/bcachefs/btree_update_interior.c
@@ -0,0 +1,2480 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_methods.h"
+#include "btree_cache.h"
+#include "btree_gc.h"
+#include "btree_journal_iter.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "btree_io.h"
+#include "btree_iter.h"
+#include "btree_locking.h"
+#include "buckets.h"
+#include "clock.h"
+#include "error.h"
+#include "extents.h"
+#include "journal.h"
+#include "journal_reclaim.h"
+#include "keylist.h"
+#include "replicas.h"
+#include "super-io.h"
+#include "trace.h"
+
+#include <linux/random.h>
+
+static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
+ struct btree_path *, struct btree *,
+ struct keylist *, unsigned);
+static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
+
+static struct btree_path *get_unlocked_mut_path(struct btree_trans *trans,
+ enum btree_id btree_id,
+ unsigned level,
+ struct bpos pos)
+{
+ struct btree_path *path;
+
+ path = bch2_path_get(trans, btree_id, pos, level + 1, level,
+ BTREE_ITER_NOPRESERVE|
+ BTREE_ITER_INTENT, _RET_IP_);
+ path = bch2_btree_path_make_mut(trans, path, true, _RET_IP_);
+ bch2_btree_path_downgrade(trans, path);
+ __bch2_btree_path_unlock(trans, path);
+ return path;
+}
+
+/* Debug code: */
+
+/*
+ * Verify that child nodes correctly span parent node's range:
+ */
+static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct bpos next_node = b->data->min_key;
+ struct btree_node_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_btree_ptr_v2 bp;
+ struct bkey unpacked;
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+
+ BUG_ON(!b->c.level);
+
+ if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
+ return;
+
+ bch2_btree_node_iter_init_from_start(&iter, b);
+
+ while (1) {
+ k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked);
+ if (k.k->type != KEY_TYPE_btree_ptr_v2)
+ break;
+ bp = bkey_s_c_to_btree_ptr_v2(k);
+
+ if (!bpos_eq(next_node, bp.v->min_key)) {
+ bch2_dump_btree_node(c, b);
+ bch2_bpos_to_text(&buf1, next_node);
+ bch2_bpos_to_text(&buf2, bp.v->min_key);
+ panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf);
+ }
+
+ bch2_btree_node_iter_advance(&iter, b);
+
+ if (bch2_btree_node_iter_end(&iter)) {
+ if (!bpos_eq(k.k->p, b->key.k.p)) {
+ bch2_dump_btree_node(c, b);
+ bch2_bpos_to_text(&buf1, b->key.k.p);
+ bch2_bpos_to_text(&buf2, k.k->p);
+ panic("expected end %s got %s\n", buf1.buf, buf2.buf);
+ }
+ break;
+ }
+
+ next_node = bpos_successor(k.k->p);
+ }
+#endif
+}
+
+/* Calculate ideal packed bkey format for new btree nodes: */
+
+void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
+{
+ struct bkey_packed *k;
+ struct bset_tree *t;
+ struct bkey uk;
+
+ for_each_bset(b, t)
+ bset_tree_for_each_key(b, t, k)
+ if (!bkey_deleted(k)) {
+ uk = bkey_unpack_key(b, k);
+ bch2_bkey_format_add_key(s, &uk);
+ }
+}
+
+static struct bkey_format bch2_btree_calc_format(struct btree *b)
+{
+ struct bkey_format_state s;
+
+ bch2_bkey_format_init(&s);
+ bch2_bkey_format_add_pos(&s, b->data->min_key);
+ bch2_bkey_format_add_pos(&s, b->data->max_key);
+ __bch2_btree_calc_format(&s, b);
+
+ return bch2_bkey_format_done(&s);
+}
+
+static size_t btree_node_u64s_with_format(struct btree *b,
+ struct bkey_format *new_f)
+{
+ struct bkey_format *old_f = &b->format;
+
+ /* stupid integer promotion rules */
+ ssize_t delta =
+ (((int) new_f->key_u64s - old_f->key_u64s) *
+ (int) b->nr.packed_keys) +
+ (((int) new_f->key_u64s - BKEY_U64s) *
+ (int) b->nr.unpacked_keys);
+
+ BUG_ON(delta + b->nr.live_u64s < 0);
+
+ return b->nr.live_u64s + delta;
+}
+
+/**
+ * bch2_btree_node_format_fits - check if we could rewrite node with a new format
+ *
+ * @c: filesystem handle
+ * @b: btree node to rewrite
+ * @new_f: bkey format to translate keys to
+ *
+ * Returns: true if all re-packed keys will be able to fit in a new node.
+ *
+ * Assumes all keys will successfully pack with the new format.
+ */
+bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
+ struct bkey_format *new_f)
+{
+ size_t u64s = btree_node_u64s_with_format(b, new_f);
+
+ return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
+}
+
+/* Btree node freeing/allocation: */
+
+static void __btree_node_free(struct bch_fs *c, struct btree *b)
+{
+ trace_and_count(c, btree_node_free, c, b);
+
+ BUG_ON(btree_node_write_blocked(b));
+ BUG_ON(btree_node_dirty(b));
+ BUG_ON(btree_node_need_write(b));
+ BUG_ON(b == btree_node_root(c, b));
+ BUG_ON(b->ob.nr);
+ BUG_ON(!list_empty(&b->write_blocked));
+ BUG_ON(b->will_make_reachable);
+
+ clear_btree_node_noevict(b);
+
+ mutex_lock(&c->btree_cache.lock);
+ list_move(&b->list, &c->btree_cache.freeable);
+ mutex_unlock(&c->btree_cache.lock);
+}
+
+static void bch2_btree_node_free_inmem(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
+{
+ struct bch_fs *c = trans->c;
+ unsigned level = b->c.level;
+
+ bch2_btree_node_lock_write_nofail(trans, path, &b->c);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+ __btree_node_free(c, b);
+ six_unlock_write(&b->c.lock);
+ mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
+
+ trans_for_each_path(trans, path)
+ if (path->l[level].b == b) {
+ btree_node_unlock(trans, path, level);
+ path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
+ }
+}
+
+static void bch2_btree_node_free_never_used(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b)
+{
+ struct bch_fs *c = as->c;
+ struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL];
+ struct btree_path *path;
+ unsigned level = b->c.level;
+
+ BUG_ON(!list_empty(&b->write_blocked));
+ BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as));
+
+ b->will_make_reachable = 0;
+ closure_put(&as->cl);
+
+ clear_btree_node_will_make_reachable(b);
+ clear_btree_node_accessed(b);
+ clear_btree_node_dirty_acct(c, b);
+ clear_btree_node_need_write(b);
+
+ mutex_lock(&c->btree_cache.lock);
+ list_del_init(&b->list);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+ mutex_unlock(&c->btree_cache.lock);
+
+ BUG_ON(p->nr >= ARRAY_SIZE(p->b));
+ p->b[p->nr++] = b;
+
+ six_unlock_intent(&b->c.lock);
+
+ trans_for_each_path(trans, path)
+ if (path->l[level].b == b) {
+ btree_node_unlock(trans, path, level);
+ path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
+ }
+}
+
+static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
+ struct disk_reservation *res,
+ struct closure *cl,
+ bool interior_node,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct write_point *wp;
+ struct btree *b;
+ BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
+ struct open_buckets obs = { .nr = 0 };
+ struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
+ unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim
+ ? BTREE_NODE_RESERVE
+ : 0;
+ int ret;
+
+ mutex_lock(&c->btree_reserve_cache_lock);
+ if (c->btree_reserve_cache_nr > nr_reserve) {
+ struct btree_alloc *a =
+ &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
+
+ obs = a->ob;
+ bkey_copy(&tmp.k, &a->k);
+ mutex_unlock(&c->btree_reserve_cache_lock);
+ goto mem_alloc;
+ }
+ mutex_unlock(&c->btree_reserve_cache_lock);
+
+retry:
+ ret = bch2_alloc_sectors_start_trans(trans,
+ c->opts.metadata_target ?:
+ c->opts.foreground_target,
+ 0,
+ writepoint_ptr(&c->btree_write_point),
+ &devs_have,
+ res->nr_replicas,
+ c->opts.metadata_replicas_required,
+ watermark, 0, cl, &wp);
+ if (unlikely(ret))
+ return ERR_PTR(ret);
+
+ if (wp->sectors_free < btree_sectors(c)) {
+ struct open_bucket *ob;
+ unsigned i;
+
+ open_bucket_for_each(c, &wp->ptrs, ob, i)
+ if (ob->sectors_free < btree_sectors(c))
+ ob->sectors_free = 0;
+
+ bch2_alloc_sectors_done(c, wp);
+ goto retry;
+ }
+
+ bkey_btree_ptr_v2_init(&tmp.k);
+ bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false);
+
+ bch2_open_bucket_get(c, wp, &obs);
+ bch2_alloc_sectors_done(c, wp);
+mem_alloc:
+ b = bch2_btree_node_mem_alloc(trans, interior_node);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+
+ /* we hold cannibalize_lock: */
+ BUG_ON(IS_ERR(b));
+ BUG_ON(b->ob.nr);
+
+ bkey_copy(&b->key, &tmp.k);
+ b->ob = obs;
+
+ return b;
+}
+
+static struct btree *bch2_btree_node_alloc(struct btree_update *as,
+ struct btree_trans *trans,
+ unsigned level)
+{
+ struct bch_fs *c = as->c;
+ struct btree *b;
+ struct prealloc_nodes *p = &as->prealloc_nodes[!!level];
+ int ret;
+
+ BUG_ON(level >= BTREE_MAX_DEPTH);
+ BUG_ON(!p->nr);
+
+ b = p->b[--p->nr];
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
+
+ set_btree_node_accessed(b);
+ set_btree_node_dirty_acct(c, b);
+ set_btree_node_need_write(b);
+
+ bch2_bset_init_first(b, &b->data->keys);
+ b->c.level = level;
+ b->c.btree_id = as->btree_id;
+ b->version_ondisk = c->sb.version;
+
+ memset(&b->nr, 0, sizeof(b->nr));
+ b->data->magic = cpu_to_le64(bset_magic(c));
+ memset(&b->data->_ptr, 0, sizeof(b->data->_ptr));
+ b->data->flags = 0;
+ SET_BTREE_NODE_ID(b->data, as->btree_id);
+ SET_BTREE_NODE_LEVEL(b->data, level);
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key);
+
+ bp->v.mem_ptr = 0;
+ bp->v.seq = b->data->keys.seq;
+ bp->v.sectors_written = 0;
+ }
+
+ SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true);
+
+ bch2_btree_build_aux_trees(b);
+
+ ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
+ BUG_ON(ret);
+
+ trace_and_count(c, btree_node_alloc, c, b);
+ bch2_increment_clock(c, btree_sectors(c), WRITE);
+ return b;
+}
+
+static void btree_set_min(struct btree *b, struct bpos pos)
+{
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2)
+ bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos;
+ b->data->min_key = pos;
+}
+
+static void btree_set_max(struct btree *b, struct bpos pos)
+{
+ b->key.k.p = pos;
+ b->data->max_key = pos;
+}
+
+static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b)
+{
+ struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level);
+ struct bkey_format format = bch2_btree_calc_format(b);
+
+ /*
+ * The keys might expand with the new format - if they wouldn't fit in
+ * the btree node anymore, use the old format for now:
+ */
+ if (!bch2_btree_node_format_fits(as->c, b, &format))
+ format = b->format;
+
+ SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
+
+ btree_set_min(n, b->data->min_key);
+ btree_set_max(n, b->data->max_key);
+
+ n->data->format = format;
+ btree_node_set_format(n, format);
+
+ bch2_btree_sort_into(as->c, n, b);
+
+ btree_node_reset_sib_u64s(n);
+ return n;
+}
+
+static struct btree *__btree_root_alloc(struct btree_update *as,
+ struct btree_trans *trans, unsigned level)
+{
+ struct btree *b = bch2_btree_node_alloc(as, trans, level);
+
+ btree_set_min(b, POS_MIN);
+ btree_set_max(b, SPOS_MAX);
+ b->data->format = bch2_btree_calc_format(b);
+
+ btree_node_set_format(b, b->data->format);
+ bch2_btree_build_aux_trees(b);
+
+ return b;
+}
+
+static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans)
+{
+ struct bch_fs *c = as->c;
+ struct prealloc_nodes *p;
+
+ for (p = as->prealloc_nodes;
+ p < as->prealloc_nodes + ARRAY_SIZE(as->prealloc_nodes);
+ p++) {
+ while (p->nr) {
+ struct btree *b = p->b[--p->nr];
+
+ mutex_lock(&c->btree_reserve_cache_lock);
+
+ if (c->btree_reserve_cache_nr <
+ ARRAY_SIZE(c->btree_reserve_cache)) {
+ struct btree_alloc *a =
+ &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
+
+ a->ob = b->ob;
+ b->ob.nr = 0;
+ bkey_copy(&a->k, &b->key);
+ } else {
+ bch2_open_buckets_put(c, &b->ob);
+ }
+
+ mutex_unlock(&c->btree_reserve_cache_lock);
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
+ __btree_node_free(c, b);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ }
+ }
+}
+
+static int bch2_btree_reserve_get(struct btree_trans *trans,
+ struct btree_update *as,
+ unsigned nr_nodes[2],
+ unsigned flags,
+ struct closure *cl)
+{
+ struct bch_fs *c = as->c;
+ struct btree *b;
+ unsigned interior;
+ int ret = 0;
+
+ BUG_ON(nr_nodes[0] + nr_nodes[1] > BTREE_RESERVE_MAX);
+
+ /*
+ * Protects reaping from the btree node cache and using the btree node
+ * open bucket reserve:
+ *
+ * BTREE_INSERT_NOWAIT only applies to btree node allocation, not
+ * blocking on this lock:
+ */
+ ret = bch2_btree_cache_cannibalize_lock(c, cl);
+ if (ret)
+ return ret;
+
+ for (interior = 0; interior < 2; interior++) {
+ struct prealloc_nodes *p = as->prealloc_nodes + interior;
+
+ while (p->nr < nr_nodes[interior]) {
+ b = __bch2_btree_node_alloc(trans, &as->disk_res,
+ flags & BTREE_INSERT_NOWAIT ? NULL : cl,
+ interior, flags);
+ if (IS_ERR(b)) {
+ ret = PTR_ERR(b);
+ goto err;
+ }
+
+ p->b[p->nr++] = b;
+ }
+ }
+err:
+ bch2_btree_cache_cannibalize_unlock(c);
+ return ret;
+}
+
+/* Asynchronous interior node update machinery */
+
+static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans)
+{
+ struct bch_fs *c = as->c;
+
+ if (as->took_gc_lock)
+ up_read(&c->gc_lock);
+ as->took_gc_lock = false;
+
+ bch2_journal_preres_put(&c->journal, &as->journal_preres);
+
+ bch2_journal_pin_drop(&c->journal, &as->journal);
+ bch2_journal_pin_flush(&c->journal, &as->journal);
+ bch2_disk_reservation_put(c, &as->disk_res);
+ bch2_btree_reserve_put(as, trans);
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total],
+ as->start_time);
+
+ mutex_lock(&c->btree_interior_update_lock);
+ list_del(&as->unwritten_list);
+ list_del(&as->list);
+
+ closure_debug_destroy(&as->cl);
+ mempool_free(as, &c->btree_interior_update_pool);
+
+ /*
+ * Have to do the wakeup with btree_interior_update_lock still held,
+ * since being on btree_interior_update_list is our ref on @c:
+ */
+ closure_wake_up(&c->btree_interior_update_wait);
+
+ mutex_unlock(&c->btree_interior_update_lock);
+}
+
+static void btree_update_add_key(struct btree_update *as,
+ struct keylist *keys, struct btree *b)
+{
+ struct bkey_i *k = &b->key;
+
+ BUG_ON(bch2_keylist_u64s(keys) + k->k.u64s >
+ ARRAY_SIZE(as->_old_keys));
+
+ bkey_copy(keys->top, k);
+ bkey_i_to_btree_ptr_v2(keys->top)->v.mem_ptr = b->c.level + 1;
+
+ bch2_keylist_push(keys);
+}
+
+/*
+ * The transactional part of an interior btree node update, where we journal the
+ * update we did to the interior node and update alloc info:
+ */
+static int btree_update_nodes_written_trans(struct btree_trans *trans,
+ struct btree_update *as)
+{
+ struct bkey_i *k;
+ int ret;
+
+ ret = darray_make_room(&trans->extra_journal_entries, as->journal_u64s);
+ if (ret)
+ return ret;
+
+ memcpy(&darray_top(trans->extra_journal_entries),
+ as->journal_entries,
+ as->journal_u64s * sizeof(u64));
+ trans->extra_journal_entries.nr += as->journal_u64s;
+
+ trans->journal_pin = &as->journal;
+
+ for_each_keylist_key(&as->old_keys, k) {
+ unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
+
+ ret = bch2_trans_mark_old(trans, as->btree_id, level, bkey_i_to_s_c(k), 0);
+ if (ret)
+ return ret;
+ }
+
+ for_each_keylist_key(&as->new_keys, k) {
+ unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
+
+ ret = bch2_trans_mark_new(trans, as->btree_id, level, k, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void btree_update_nodes_written(struct btree_update *as)
+{
+ struct bch_fs *c = as->c;
+ struct btree *b;
+ struct btree_trans *trans = bch2_trans_get(c);
+ u64 journal_seq = 0;
+ unsigned i;
+ int ret;
+
+ /*
+ * If we're already in an error state, it might be because a btree node
+ * was never written, and we might be trying to free that same btree
+ * node here, but it won't have been marked as allocated and we'll see
+ * spurious disk usage inconsistencies in the transactional part below
+ * if we don't skip it:
+ */
+ ret = bch2_journal_error(&c->journal);
+ if (ret)
+ goto err;
+
+ /*
+ * Wait for any in flight writes to finish before we free the old nodes
+ * on disk:
+ */
+ for (i = 0; i < as->nr_old_nodes; i++) {
+ __le64 seq;
+
+ b = as->old_nodes[i];
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
+ seq = b->data ? b->data->keys.seq : 0;
+ six_unlock_read(&b->c.lock);
+
+ if (seq == as->old_nodes_seq[i])
+ wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner,
+ TASK_UNINTERRUPTIBLE);
+ }
+
+ /*
+ * We did an update to a parent node where the pointers we added pointed
+ * to child nodes that weren't written yet: now, the child nodes have
+ * been written so we can write out the update to the interior node.
+ */
+
+ /*
+ * We can't call into journal reclaim here: we'd block on the journal
+ * reclaim lock, but we may need to release the open buckets we have
+ * pinned in order for other btree updates to make forward progress, and
+ * journal reclaim does btree updates when flushing bkey_cached entries,
+ * which may require allocations as well.
+ */
+ ret = commit_do(trans, &as->disk_res, &journal_seq,
+ BCH_WATERMARK_reclaim|
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_JOURNAL_RECLAIM,
+ btree_update_nodes_written_trans(trans, as));
+ bch2_trans_unlock(trans);
+
+ bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
+ "%s(): error %s", __func__, bch2_err_str(ret));
+err:
+ if (as->b) {
+ struct btree_path *path;
+
+ b = as->b;
+ path = get_unlocked_mut_path(trans, as->btree_id, b->c.level, b->key.k.p);
+ /*
+ * @b is the node we did the final insert into:
+ *
+ * On failure to get a journal reservation, we still have to
+ * unblock the write and allow most of the write path to happen
+ * so that shutdown works, but the i->journal_seq mechanism
+ * won't work to prevent the btree write from being visible (we
+ * didn't get a journal sequence number) - instead
+ * __bch2_btree_node_write() doesn't do the actual write if
+ * we're in journal error state:
+ */
+
+ /*
+ * Ensure transaction is unlocked before using
+ * btree_node_lock_nopath() (the use of which is always suspect,
+ * we need to work on removing this in the future)
+ *
+ * It should be, but get_unlocked_mut_path() -> bch2_path_get()
+ * calls bch2_path_upgrade(), before we call path_make_mut(), so
+ * we may rarely end up with a locked path besides the one we
+ * have here:
+ */
+ bch2_trans_unlock(trans);
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
+ path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
+ path->l[b->c.level].b = b;
+
+ bch2_btree_node_lock_write_nofail(trans, path, &b->c);
+
+ mutex_lock(&c->btree_interior_update_lock);
+
+ list_del(&as->write_blocked_list);
+ if (list_empty(&b->write_blocked))
+ clear_btree_node_write_blocked(b);
+
+ /*
+ * Node might have been freed, recheck under
+ * btree_interior_update_lock:
+ */
+ if (as->b == b) {
+ BUG_ON(!b->c.level);
+ BUG_ON(!btree_node_dirty(b));
+
+ if (!ret) {
+ struct bset *last = btree_bset_last(b);
+
+ last->journal_seq = cpu_to_le64(
+ max(journal_seq,
+ le64_to_cpu(last->journal_seq)));
+
+ bch2_btree_add_journal_pin(c, b, journal_seq);
+ } else {
+ /*
+ * If we didn't get a journal sequence number we
+ * can't write this btree node, because recovery
+ * won't know to ignore this write:
+ */
+ set_btree_node_never_write(b);
+ }
+ }
+
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
+ six_unlock_write(&b->c.lock);
+
+ btree_node_write_if_need(c, b, SIX_LOCK_intent);
+ btree_node_unlock(trans, path, b->c.level);
+ bch2_path_put(trans, path, true);
+ }
+
+ bch2_journal_pin_drop(&c->journal, &as->journal);
+
+ bch2_journal_preres_put(&c->journal, &as->journal_preres);
+
+ mutex_lock(&c->btree_interior_update_lock);
+ for (i = 0; i < as->nr_new_nodes; i++) {
+ b = as->new_nodes[i];
+
+ BUG_ON(b->will_make_reachable != (unsigned long) as);
+ b->will_make_reachable = 0;
+ clear_btree_node_will_make_reachable(b);
+ }
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ for (i = 0; i < as->nr_new_nodes; i++) {
+ b = as->new_nodes[i];
+
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
+ btree_node_write_if_need(c, b, SIX_LOCK_read);
+ six_unlock_read(&b->c.lock);
+ }
+
+ for (i = 0; i < as->nr_open_buckets; i++)
+ bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]);
+
+ bch2_btree_update_free(as, trans);
+ bch2_trans_put(trans);
+}
+
+static void btree_interior_update_work(struct work_struct *work)
+{
+ struct bch_fs *c =
+ container_of(work, struct bch_fs, btree_interior_update_work);
+ struct btree_update *as;
+
+ while (1) {
+ mutex_lock(&c->btree_interior_update_lock);
+ as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
+ struct btree_update, unwritten_list);
+ if (as && !as->nodes_written)
+ as = NULL;
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ if (!as)
+ break;
+
+ btree_update_nodes_written(as);
+ }
+}
+
+static void btree_update_set_nodes_written(struct closure *cl)
+{
+ struct btree_update *as = container_of(cl, struct btree_update, cl);
+ struct bch_fs *c = as->c;
+
+ mutex_lock(&c->btree_interior_update_lock);
+ as->nodes_written = true;
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
+}
+
+/*
+ * We're updating @b with pointers to nodes that haven't finished writing yet:
+ * block @b from being written until @as completes
+ */
+static void btree_update_updated_node(struct btree_update *as, struct btree *b)
+{
+ struct bch_fs *c = as->c;
+
+ mutex_lock(&c->btree_interior_update_lock);
+ list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
+
+ BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
+ BUG_ON(!btree_node_dirty(b));
+ BUG_ON(!b->c.level);
+
+ as->mode = BTREE_INTERIOR_UPDATING_NODE;
+ as->b = b;
+
+ set_btree_node_write_blocked(b);
+ list_add(&as->write_blocked_list, &b->write_blocked);
+
+ mutex_unlock(&c->btree_interior_update_lock);
+}
+
+static void btree_update_reparent(struct btree_update *as,
+ struct btree_update *child)
+{
+ struct bch_fs *c = as->c;
+
+ lockdep_assert_held(&c->btree_interior_update_lock);
+
+ child->b = NULL;
+ child->mode = BTREE_INTERIOR_UPDATING_AS;
+
+ bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, NULL);
+}
+
+static void btree_update_updated_root(struct btree_update *as, struct btree *b)
+{
+ struct bkey_i *insert = &b->key;
+ struct bch_fs *c = as->c;
+
+ BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
+
+ BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
+ ARRAY_SIZE(as->journal_entries));
+
+ as->journal_u64s +=
+ journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
+ BCH_JSET_ENTRY_btree_root,
+ b->c.btree_id, b->c.level,
+ insert, insert->k.u64s);
+
+ mutex_lock(&c->btree_interior_update_lock);
+ list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
+
+ as->mode = BTREE_INTERIOR_UPDATING_ROOT;
+ mutex_unlock(&c->btree_interior_update_lock);
+}
+
+/*
+ * bch2_btree_update_add_new_node:
+ *
+ * This causes @as to wait on @b to be written, before it gets to
+ * bch2_btree_update_nodes_written
+ *
+ * Additionally, it sets b->will_make_reachable to prevent any additional writes
+ * to @b from happening besides the first until @b is reachable on disk
+ *
+ * And it adds @b to the list of @as's new nodes, so that we can update sector
+ * counts in bch2_btree_update_nodes_written:
+ */
+static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
+{
+ struct bch_fs *c = as->c;
+
+ closure_get(&as->cl);
+
+ mutex_lock(&c->btree_interior_update_lock);
+ BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
+ BUG_ON(b->will_make_reachable);
+
+ as->new_nodes[as->nr_new_nodes++] = b;
+ b->will_make_reachable = 1UL|(unsigned long) as;
+ set_btree_node_will_make_reachable(b);
+
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ btree_update_add_key(as, &as->new_keys, b);
+
+ if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
+ unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data;
+ unsigned sectors = round_up(bytes, block_bytes(c)) >> 9;
+
+ bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
+ cpu_to_le16(sectors);
+ }
+}
+
+/*
+ * returns true if @b was a new node
+ */
+static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
+{
+ struct btree_update *as;
+ unsigned long v;
+ unsigned i;
+
+ mutex_lock(&c->btree_interior_update_lock);
+ /*
+ * When b->will_make_reachable != 0, it owns a ref on as->cl that's
+ * dropped when it gets written by bch2_btree_complete_write - the
+ * xchg() is for synchronization with bch2_btree_complete_write:
+ */
+ v = xchg(&b->will_make_reachable, 0);
+ clear_btree_node_will_make_reachable(b);
+ as = (struct btree_update *) (v & ~1UL);
+
+ if (!as) {
+ mutex_unlock(&c->btree_interior_update_lock);
+ return;
+ }
+
+ for (i = 0; i < as->nr_new_nodes; i++)
+ if (as->new_nodes[i] == b)
+ goto found;
+
+ BUG();
+found:
+ array_remove_item(as->new_nodes, as->nr_new_nodes, i);
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ if (v & 1)
+ closure_put(&as->cl);
+}
+
+static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
+{
+ while (b->ob.nr)
+ as->open_buckets[as->nr_open_buckets++] =
+ b->ob.v[--b->ob.nr];
+}
+
+/*
+ * @b is being split/rewritten: it may have pointers to not-yet-written btree
+ * nodes and thus outstanding btree_updates - redirect @b's
+ * btree_updates to point to this btree_update:
+ */
+static void bch2_btree_interior_update_will_free_node(struct btree_update *as,
+ struct btree *b)
+{
+ struct bch_fs *c = as->c;
+ struct btree_update *p, *n;
+ struct btree_write *w;
+
+ set_btree_node_dying(b);
+
+ if (btree_node_fake(b))
+ return;
+
+ mutex_lock(&c->btree_interior_update_lock);
+
+ /*
+ * Does this node have any btree_update operations preventing
+ * it from being written?
+ *
+ * If so, redirect them to point to this btree_update: we can
+ * write out our new nodes, but we won't make them visible until those
+ * operations complete
+ */
+ list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
+ list_del_init(&p->write_blocked_list);
+ btree_update_reparent(as, p);
+
+ /*
+ * for flush_held_btree_writes() waiting on updates to flush or
+ * nodes to be writeable:
+ */
+ closure_wake_up(&c->btree_interior_update_wait);
+ }
+
+ clear_btree_node_dirty_acct(c, b);
+ clear_btree_node_need_write(b);
+ clear_btree_node_write_blocked(b);
+
+ /*
+ * Does this node have unwritten data that has a pin on the journal?
+ *
+ * If so, transfer that pin to the btree_update operation -
+ * note that if we're freeing multiple nodes, we only need to keep the
+ * oldest pin of any of the nodes we're freeing. We'll release the pin
+ * when the new nodes are persistent and reachable on disk:
+ */
+ w = btree_current_write(b);
+ bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL);
+ bch2_journal_pin_drop(&c->journal, &w->journal);
+
+ w = btree_prev_write(b);
+ bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, NULL);
+ bch2_journal_pin_drop(&c->journal, &w->journal);
+
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ /*
+ * Is this a node that isn't reachable on disk yet?
+ *
+ * Nodes that aren't reachable yet have writes blocked until they're
+ * reachable - now that we've cancelled any pending writes and moved
+ * things waiting on that write to wait on this update, we can drop this
+ * node from the list of nodes that the other update is making
+ * reachable, prior to freeing it:
+ */
+ btree_update_drop_new_node(c, b);
+
+ btree_update_add_key(as, &as->old_keys, b);
+
+ as->old_nodes[as->nr_old_nodes] = b;
+ as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq;
+ as->nr_old_nodes++;
+}
+
+static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans)
+{
+ struct bch_fs *c = as->c;
+ u64 start_time = as->start_time;
+
+ BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
+
+ if (as->took_gc_lock)
+ up_read(&as->c->gc_lock);
+ as->took_gc_lock = false;
+
+ bch2_btree_reserve_put(as, trans);
+
+ continue_at(&as->cl, btree_update_set_nodes_written,
+ as->c->btree_interior_update_worker);
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_foreground],
+ start_time);
+}
+
+static struct btree_update *
+bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
+ unsigned level, bool split, unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_update *as;
+ u64 start_time = local_clock();
+ int disk_res_flags = (flags & BTREE_INSERT_NOFAIL)
+ ? BCH_DISK_RESERVATION_NOFAIL : 0;
+ unsigned nr_nodes[2] = { 0, 0 };
+ unsigned update_level = level;
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
+ unsigned journal_flags = 0;
+ int ret = 0;
+ u32 restart_count = trans->restart_count;
+
+ BUG_ON(!path->should_be_locked);
+
+ if (watermark == BCH_WATERMARK_copygc)
+ watermark = BCH_WATERMARK_btree_copygc;
+ if (watermark < BCH_WATERMARK_btree)
+ watermark = BCH_WATERMARK_btree;
+
+ flags &= ~BCH_WATERMARK_MASK;
+ flags |= watermark;
+
+ if (flags & BTREE_INSERT_JOURNAL_RECLAIM)
+ journal_flags |= JOURNAL_RES_GET_NONBLOCK;
+ journal_flags |= watermark;
+
+ while (1) {
+ nr_nodes[!!update_level] += 1 + split;
+ update_level++;
+
+ ret = bch2_btree_path_upgrade(trans, path, update_level + 1);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!btree_path_node(path, update_level)) {
+ /* Allocating new root? */
+ nr_nodes[1] += split;
+ update_level = BTREE_MAX_DEPTH;
+ break;
+ }
+
+ if (bch2_btree_node_insert_fits(c, path->l[update_level].b,
+ BKEY_BTREE_PTR_U64s_MAX * (1 + split)))
+ break;
+
+ split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
+ }
+
+ if (flags & BTREE_INSERT_GC_LOCK_HELD)
+ lockdep_assert_held(&c->gc_lock);
+ else if (!down_read_trylock(&c->gc_lock)) {
+ ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0));
+ if (ret) {
+ up_read(&c->gc_lock);
+ return ERR_PTR(ret);
+ }
+ }
+
+ as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
+ memset(as, 0, sizeof(*as));
+ closure_init(&as->cl, NULL);
+ as->c = c;
+ as->start_time = start_time;
+ as->mode = BTREE_INTERIOR_NO_UPDATE;
+ as->took_gc_lock = !(flags & BTREE_INSERT_GC_LOCK_HELD);
+ as->btree_id = path->btree_id;
+ as->update_level = update_level;
+ INIT_LIST_HEAD(&as->list);
+ INIT_LIST_HEAD(&as->unwritten_list);
+ INIT_LIST_HEAD(&as->write_blocked_list);
+ bch2_keylist_init(&as->old_keys, as->_old_keys);
+ bch2_keylist_init(&as->new_keys, as->_new_keys);
+ bch2_keylist_init(&as->parent_keys, as->inline_keys);
+
+ mutex_lock(&c->btree_interior_update_lock);
+ list_add_tail(&as->list, &c->btree_interior_update_list);
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ /*
+ * We don't want to allocate if we're in an error state, that can cause
+ * deadlock on emergency shutdown due to open buckets getting stuck in
+ * the btree_reserve_cache after allocator shutdown has cleared it out.
+ * This check needs to come after adding us to the btree_interior_update
+ * list but before calling bch2_btree_reserve_get, to synchronize with
+ * __bch2_fs_read_only().
+ */
+ ret = bch2_journal_error(&c->journal);
+ if (ret)
+ goto err;
+
+ ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
+ BTREE_UPDATE_JOURNAL_RES,
+ journal_flags|JOURNAL_RES_GET_NONBLOCK);
+ if (ret) {
+ if (flags & BTREE_INSERT_JOURNAL_RECLAIM) {
+ ret = -BCH_ERR_journal_reclaim_would_deadlock;
+ goto err;
+ }
+
+ ret = drop_locks_do(trans,
+ bch2_journal_preres_get(&c->journal, &as->journal_preres,
+ BTREE_UPDATE_JOURNAL_RES,
+ journal_flags));
+ if (ret == -BCH_ERR_journal_preres_get_blocked) {
+ trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags);
+ ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get);
+ }
+ if (ret)
+ goto err;
+ }
+
+ ret = bch2_disk_reservation_get(c, &as->disk_res,
+ (nr_nodes[0] + nr_nodes[1]) * btree_sectors(c),
+ c->opts.metadata_replicas,
+ disk_res_flags);
+ if (ret)
+ goto err;
+
+ ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL);
+ if (bch2_err_matches(ret, ENOSPC) ||
+ bch2_err_matches(ret, ENOMEM)) {
+ struct closure cl;
+
+ /*
+ * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
+ * flag
+ */
+ if (bch2_err_matches(ret, ENOSPC) &&
+ (flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
+ watermark != BCH_WATERMARK_reclaim) {
+ ret = -BCH_ERR_journal_reclaim_would_deadlock;
+ goto err;
+ }
+
+ closure_init_stack(&cl);
+
+ do {
+ ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl);
+
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ } while (bch2_err_matches(ret, BCH_ERR_operation_blocked));
+ }
+
+ if (ret) {
+ trace_and_count(c, btree_reserve_get_fail, trans->fn,
+ _RET_IP_, nr_nodes[0] + nr_nodes[1], ret);
+ goto err;
+ }
+
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ goto err;
+
+ bch2_trans_verify_not_restarted(trans, restart_count);
+ return as;
+err:
+ bch2_btree_update_free(as, trans);
+ return ERR_PTR(ret);
+}
+
+/* Btree root updates: */
+
+static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
+{
+ /* Root nodes cannot be reaped */
+ mutex_lock(&c->btree_cache.lock);
+ list_del_init(&b->list);
+ mutex_unlock(&c->btree_cache.lock);
+
+ mutex_lock(&c->btree_root_lock);
+ BUG_ON(btree_node_root(c, b) &&
+ (b->c.level < btree_node_root(c, b)->c.level ||
+ !btree_node_dying(btree_node_root(c, b))));
+
+ bch2_btree_id_root(c, b->c.btree_id)->b = b;
+ mutex_unlock(&c->btree_root_lock);
+
+ bch2_recalc_btree_reserve(c);
+}
+
+static void bch2_btree_set_root(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b)
+{
+ struct bch_fs *c = as->c;
+ struct btree *old;
+
+ trace_and_count(c, btree_node_set_root, c, b);
+
+ old = btree_node_root(c, b);
+
+ /*
+ * Ensure no one is using the old root while we switch to the
+ * new root:
+ */
+ bch2_btree_node_lock_write_nofail(trans, path, &old->c);
+
+ bch2_btree_set_root_inmem(c, b);
+
+ btree_update_updated_root(as, b);
+
+ /*
+ * Unlock old root after new root is visible:
+ *
+ * The new root isn't persistent, but that's ok: we still have
+ * an intent lock on the new root, and any updates that would
+ * depend on the new root would have to update the new root.
+ */
+ bch2_btree_node_unlock_write(trans, path, old);
+}
+
+/* Interior node updates: */
+
+static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter *node_iter,
+ struct bkey_i *insert)
+{
+ struct bch_fs *c = as->c;
+ struct bkey_packed *k;
+ struct printbuf buf = PRINTBUF;
+ unsigned long old, new, v;
+
+ BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
+ !btree_ptr_sectors_written(insert));
+
+ if (unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)))
+ bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p);
+
+ if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
+ btree_node_type(b), WRITE, &buf) ?:
+ bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf)) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "inserting invalid bkey\n ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
+ prt_printf(&buf, "\n ");
+ bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
+ btree_node_type(b), WRITE, &buf);
+ bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf);
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+ dump_stack();
+ }
+
+ BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
+ ARRAY_SIZE(as->journal_entries));
+
+ as->journal_u64s +=
+ journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
+ BCH_JSET_ENTRY_btree_keys,
+ b->c.btree_id, b->c.level,
+ insert, insert->k.u64s);
+
+ while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
+ bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
+ bch2_btree_node_iter_advance(node_iter, b);
+
+ bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
+ set_btree_node_dirty_acct(c, b);
+
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
+
+ new &= ~BTREE_WRITE_TYPE_MASK;
+ new |= BTREE_WRITE_interior;
+ new |= 1 << BTREE_NODE_need_write;
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
+
+ printbuf_exit(&buf);
+}
+
+static void
+__bch2_btree_insert_keys_interior(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter node_iter,
+ struct keylist *keys)
+{
+ struct bkey_i *insert = bch2_keylist_front(keys);
+ struct bkey_packed *k;
+
+ BUG_ON(btree_node_type(b) != BKEY_TYPE_btree);
+
+ while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
+ (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0))
+ ;
+
+ while (!bch2_keylist_empty(keys)) {
+ insert = bch2_keylist_front(keys);
+
+ if (bpos_gt(insert->k.p, b->key.k.p))
+ break;
+
+ bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert);
+ bch2_keylist_pop_front(keys);
+ }
+}
+
+/*
+ * Move keys from n1 (original replacement node, now lower node) to n2 (higher
+ * node)
+ */
+static void __btree_split_node(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree *b,
+ struct btree *n[2])
+{
+ struct bkey_packed *k;
+ struct bpos n1_pos = POS_MIN;
+ struct btree_node_iter iter;
+ struct bset *bsets[2];
+ struct bkey_format_state format[2];
+ struct bkey_packed *out[2];
+ struct bkey uk;
+ unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ BUG_ON(n[i]->nsets != 1);
+
+ bsets[i] = btree_bset_first(n[i]);
+ out[i] = bsets[i]->start;
+
+ SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1);
+ bch2_bkey_format_init(&format[i]);
+ }
+
+ u64s = 0;
+ for_each_btree_node_key(b, k, &iter) {
+ if (bkey_deleted(k))
+ continue;
+
+ i = u64s >= n1_u64s;
+ u64s += k->u64s;
+ uk = bkey_unpack_key(b, k);
+ if (!i)
+ n1_pos = uk.p;
+ bch2_bkey_format_add_key(&format[i], &uk);
+ }
+
+ btree_set_min(n[0], b->data->min_key);
+ btree_set_max(n[0], n1_pos);
+ btree_set_min(n[1], bpos_successor(n1_pos));
+ btree_set_max(n[1], b->data->max_key);
+
+ for (i = 0; i < 2; i++) {
+ bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key);
+ bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
+
+ n[i]->data->format = bch2_bkey_format_done(&format[i]);
+ btree_node_set_format(n[i], n[i]->data->format);
+ }
+
+ u64s = 0;
+ for_each_btree_node_key(b, k, &iter) {
+ if (bkey_deleted(k))
+ continue;
+
+ i = u64s >= n1_u64s;
+ u64s += k->u64s;
+
+ if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k)
+ ? &b->format: &bch2_bkey_format_current, k))
+ out[i]->format = KEY_FORMAT_LOCAL_BTREE;
+ else
+ bch2_bkey_unpack(b, (void *) out[i], k);
+
+ out[i]->needs_whiteout = false;
+
+ btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
+ out[i] = bkey_p_next(out[i]);
+ }
+
+ for (i = 0; i < 2; i++) {
+ bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data);
+
+ BUG_ON(!bsets[i]->u64s);
+
+ set_btree_bset_end(n[i], n[i]->set);
+
+ btree_node_reset_sib_u64s(n[i]);
+
+ bch2_verify_btree_nr_keys(n[i]);
+
+ if (b->c.level)
+ btree_node_interior_verify(as->c, n[i]);
+ }
+}
+
+/*
+ * For updates to interior nodes, we've got to do the insert before we split
+ * because the stuff we're inserting has to be inserted atomically. Post split,
+ * the keys might have to go in different nodes and the split would no longer be
+ * atomic.
+ *
+ * Worse, if the insert is from btree node coalescing, if we do the insert after
+ * we do the split (and pick the pivot) - the pivot we pick might be between
+ * nodes that were coalesced, and thus in the middle of a child node post
+ * coalescing:
+ */
+static void btree_split_insert_keys(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct keylist *keys)
+{
+ if (!bch2_keylist_empty(keys) &&
+ bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
+ struct btree_node_iter node_iter;
+
+ bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
+
+ __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+
+ btree_node_interior_verify(as->c, b);
+ }
+}
+
+static int btree_split(struct btree_update *as, struct btree_trans *trans,
+ struct btree_path *path, struct btree *b,
+ struct keylist *keys, unsigned flags)
+{
+ struct bch_fs *c = as->c;
+ struct btree *parent = btree_node_parent(path, b);
+ struct btree *n1, *n2 = NULL, *n3 = NULL;
+ struct btree_path *path1 = NULL, *path2 = NULL;
+ u64 start_time = local_clock();
+ int ret = 0;
+
+ BUG_ON(!parent && (b != btree_node_root(c, b)));
+ BUG_ON(parent && !btree_node_intent_locked(path, b->c.level + 1));
+
+ bch2_btree_interior_update_will_free_node(as, b);
+
+ if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
+ struct btree *n[2];
+
+ trace_and_count(c, btree_node_split, c, b);
+
+ n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level);
+ n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level);
+
+ __btree_split_node(as, trans, b, n);
+
+ if (keys) {
+ btree_split_insert_keys(as, trans, path, n1, keys);
+ btree_split_insert_keys(as, trans, path, n2, keys);
+ BUG_ON(!bch2_keylist_empty(keys));
+ }
+
+ bch2_btree_build_aux_trees(n2);
+ bch2_btree_build_aux_trees(n1);
+
+ bch2_btree_update_add_new_node(as, n1);
+ bch2_btree_update_add_new_node(as, n2);
+ six_unlock_write(&n2->c.lock);
+ six_unlock_write(&n1->c.lock);
+
+ path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p);
+ six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
+ bch2_btree_path_level_init(trans, path1, n1);
+
+ path2 = get_unlocked_mut_path(trans, path->btree_id, n2->c.level, n2->key.k.p);
+ six_lock_increment(&n2->c.lock, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path2, n2->c.level, BTREE_NODE_INTENT_LOCKED);
+ bch2_btree_path_level_init(trans, path2, n2);
+
+ /*
+ * Note that on recursive parent_keys == keys, so we
+ * can't start adding new keys to parent_keys before emptying it
+ * out (which we did with btree_split_insert_keys() above)
+ */
+ bch2_keylist_add(&as->parent_keys, &n1->key);
+ bch2_keylist_add(&as->parent_keys, &n2->key);
+
+ if (!parent) {
+ /* Depth increases, make a new root */
+ n3 = __btree_root_alloc(as, trans, b->c.level + 1);
+
+ bch2_btree_update_add_new_node(as, n3);
+ six_unlock_write(&n3->c.lock);
+
+ path2->locks_want++;
+ BUG_ON(btree_node_locked(path2, n3->c.level));
+ six_lock_increment(&n3->c.lock, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path2, n3->c.level, BTREE_NODE_INTENT_LOCKED);
+ bch2_btree_path_level_init(trans, path2, n3);
+
+ n3->sib_u64s[0] = U16_MAX;
+ n3->sib_u64s[1] = U16_MAX;
+
+ btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
+ }
+ } else {
+ trace_and_count(c, btree_node_compact, c, b);
+
+ n1 = bch2_btree_node_alloc_replacement(as, trans, b);
+
+ if (keys) {
+ btree_split_insert_keys(as, trans, path, n1, keys);
+ BUG_ON(!bch2_keylist_empty(keys));
+ }
+
+ bch2_btree_build_aux_trees(n1);
+ bch2_btree_update_add_new_node(as, n1);
+ six_unlock_write(&n1->c.lock);
+
+ path1 = get_unlocked_mut_path(trans, path->btree_id, n1->c.level, n1->key.k.p);
+ six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
+ bch2_btree_path_level_init(trans, path1, n1);
+
+ if (parent)
+ bch2_keylist_add(&as->parent_keys, &n1->key);
+ }
+
+ /* New nodes all written, now make them visible: */
+
+ if (parent) {
+ /* Split a non root node */
+ ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
+ if (ret)
+ goto err;
+ } else if (n3) {
+ bch2_btree_set_root(as, trans, path, n3);
+ } else {
+ /* Root filled up but didn't need to be split */
+ bch2_btree_set_root(as, trans, path, n1);
+ }
+
+ if (n3) {
+ bch2_btree_update_get_open_buckets(as, n3);
+ bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
+ }
+ if (n2) {
+ bch2_btree_update_get_open_buckets(as, n2);
+ bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
+ }
+ bch2_btree_update_get_open_buckets(as, n1);
+ bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
+
+ /*
+ * The old node must be freed (in memory) _before_ unlocking the new
+ * nodes - else another thread could re-acquire a read lock on the old
+ * node after another thread has locked and updated the new node, thus
+ * seeing stale data:
+ */
+ bch2_btree_node_free_inmem(trans, path, b);
+
+ if (n3)
+ bch2_trans_node_add(trans, n3);
+ if (n2)
+ bch2_trans_node_add(trans, n2);
+ bch2_trans_node_add(trans, n1);
+
+ if (n3)
+ six_unlock_intent(&n3->c.lock);
+ if (n2)
+ six_unlock_intent(&n2->c.lock);
+ six_unlock_intent(&n1->c.lock);
+out:
+ if (path2) {
+ __bch2_btree_path_unlock(trans, path2);
+ bch2_path_put(trans, path2, true);
+ }
+ if (path1) {
+ __bch2_btree_path_unlock(trans, path1);
+ bch2_path_put(trans, path1, true);
+ }
+
+ bch2_trans_verify_locks(trans);
+
+ bch2_time_stats_update(&c->times[n2
+ ? BCH_TIME_btree_node_split
+ : BCH_TIME_btree_node_compact],
+ start_time);
+ return ret;
+err:
+ if (n3)
+ bch2_btree_node_free_never_used(as, trans, n3);
+ if (n2)
+ bch2_btree_node_free_never_used(as, trans, n2);
+ bch2_btree_node_free_never_used(as, trans, n1);
+ goto out;
+}
+
+static void
+bch2_btree_insert_keys_interior(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct keylist *keys)
+{
+ struct btree_path *linked;
+
+ __bch2_btree_insert_keys_interior(as, trans, path, b,
+ path->l[b->c.level].iter, keys);
+
+ btree_update_updated_node(as, b);
+
+ trans_for_each_path_with_node(trans, b, linked)
+ bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
+
+ bch2_trans_verify_paths(trans);
+}
+
+/**
+ * bch2_btree_insert_node - insert bkeys into a given btree node
+ *
+ * @as: btree_update object
+ * @trans: btree_trans object
+ * @path: path that points to current node
+ * @b: node to insert keys into
+ * @keys: list of keys to insert
+ * @flags: transaction commit flags
+ *
+ * Returns: 0 on success, typically transaction restart error on failure
+ *
+ * Inserts as many keys as it can into a given btree node, splitting it if full.
+ * If a split occurred, this function will return early. This can only happen
+ * for leaf nodes -- inserts into interior nodes have to be atomic.
+ */
+static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans,
+ struct btree_path *path, struct btree *b,
+ struct keylist *keys, unsigned flags)
+{
+ struct bch_fs *c = as->c;
+ int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
+ int old_live_u64s = b->nr.live_u64s;
+ int live_u64s_added, u64s_added;
+ int ret;
+
+ lockdep_assert_held(&c->gc_lock);
+ BUG_ON(!btree_node_intent_locked(path, b->c.level));
+ BUG_ON(!b->c.level);
+ BUG_ON(!as || as->b);
+ bch2_verify_keylist_sorted(keys);
+
+ ret = bch2_btree_node_lock_write(trans, path, &b->c);
+ if (ret)
+ return ret;
+
+ bch2_btree_node_prep_for_write(trans, path, b);
+
+ if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
+ bch2_btree_node_unlock_write(trans, path, b);
+ goto split;
+ }
+
+ btree_node_interior_verify(c, b);
+
+ bch2_btree_insert_keys_interior(as, trans, path, b, keys);
+
+ live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
+ u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
+
+ if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
+ b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
+ if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
+ b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
+
+ if (u64s_added > live_u64s_added &&
+ bch2_maybe_compact_whiteouts(c, b))
+ bch2_trans_node_reinit_iter(trans, b);
+
+ bch2_btree_node_unlock_write(trans, path, b);
+
+ btree_node_interior_verify(c, b);
+ return 0;
+split:
+ /*
+ * We could attempt to avoid the transaction restart, by calling
+ * bch2_btree_path_upgrade() and allocating more nodes:
+ */
+ if (b->c.level >= as->update_level) {
+ trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
+ }
+
+ return btree_split(as, trans, path, b, keys, flags);
+}
+
+int bch2_btree_split_leaf(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned flags)
+{
+ struct btree *b = path_l(path)->b;
+ struct btree_update *as;
+ unsigned l;
+ int ret = 0;
+
+ as = bch2_btree_update_start(trans, path, path->level,
+ true, flags);
+ if (IS_ERR(as))
+ return PTR_ERR(as);
+
+ ret = btree_split(as, trans, path, b, NULL, flags);
+ if (ret) {
+ bch2_btree_update_free(as, trans);
+ return ret;
+ }
+
+ bch2_btree_update_done(as, trans);
+
+ for (l = path->level + 1; btree_node_intent_locked(path, l) && !ret; l++)
+ ret = bch2_foreground_maybe_merge(trans, path, l, flags);
+
+ return ret;
+}
+
+int __bch2_foreground_maybe_merge(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned level,
+ unsigned flags,
+ enum btree_node_sibling sib)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path *sib_path = NULL, *new_path = NULL;
+ struct btree_update *as;
+ struct bkey_format_state new_s;
+ struct bkey_format new_f;
+ struct bkey_i delete;
+ struct btree *b, *m, *n, *prev, *next, *parent;
+ struct bpos sib_pos;
+ size_t sib_u64s;
+ u64 start_time = local_clock();
+ int ret = 0;
+
+ BUG_ON(!path->should_be_locked);
+ BUG_ON(!btree_node_locked(path, level));
+
+ b = path->l[level].b;
+
+ if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
+ (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
+ b->sib_u64s[sib] = U16_MAX;
+ return 0;
+ }
+
+ sib_pos = sib == btree_prev_sib
+ ? bpos_predecessor(b->data->min_key)
+ : bpos_successor(b->data->max_key);
+
+ sib_path = bch2_path_get(trans, path->btree_id, sib_pos,
+ U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
+ ret = bch2_btree_path_traverse(trans, sib_path, false);
+ if (ret)
+ goto err;
+
+ btree_path_set_should_be_locked(sib_path);
+
+ m = sib_path->l[level].b;
+
+ if (btree_node_parent(path, b) !=
+ btree_node_parent(sib_path, m)) {
+ b->sib_u64s[sib] = U16_MAX;
+ goto out;
+ }
+
+ if (sib == btree_prev_sib) {
+ prev = m;
+ next = b;
+ } else {
+ prev = b;
+ next = m;
+ }
+
+ if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+
+ bch2_bpos_to_text(&buf1, prev->data->max_key);
+ bch2_bpos_to_text(&buf2, next->data->min_key);
+ bch_err(c,
+ "%s(): btree topology error:\n"
+ " prev ends at %s\n"
+ " next starts at %s",
+ __func__, buf1.buf, buf2.buf);
+ printbuf_exit(&buf1);
+ printbuf_exit(&buf2);
+ bch2_topology_error(c);
+ ret = -EIO;
+ goto err;
+ }
+
+ bch2_bkey_format_init(&new_s);
+ bch2_bkey_format_add_pos(&new_s, prev->data->min_key);
+ __bch2_btree_calc_format(&new_s, prev);
+ __bch2_btree_calc_format(&new_s, next);
+ bch2_bkey_format_add_pos(&new_s, next->data->max_key);
+ new_f = bch2_bkey_format_done(&new_s);
+
+ sib_u64s = btree_node_u64s_with_format(b, &new_f) +
+ btree_node_u64s_with_format(m, &new_f);
+
+ if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
+ sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
+ sib_u64s /= 2;
+ sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
+ }
+
+ sib_u64s = min(sib_u64s, btree_max_u64s(c));
+ sib_u64s = min(sib_u64s, (size_t) U16_MAX - 1);
+ b->sib_u64s[sib] = sib_u64s;
+
+ if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
+ goto out;
+
+ parent = btree_node_parent(path, b);
+ as = bch2_btree_update_start(trans, path, level, false,
+ BTREE_INSERT_NOFAIL|flags);
+ ret = PTR_ERR_OR_ZERO(as);
+ if (ret)
+ goto err;
+
+ trace_and_count(c, btree_node_merge, c, b);
+
+ bch2_btree_interior_update_will_free_node(as, b);
+ bch2_btree_interior_update_will_free_node(as, m);
+
+ n = bch2_btree_node_alloc(as, trans, b->c.level);
+
+ SET_BTREE_NODE_SEQ(n->data,
+ max(BTREE_NODE_SEQ(b->data),
+ BTREE_NODE_SEQ(m->data)) + 1);
+
+ btree_set_min(n, prev->data->min_key);
+ btree_set_max(n, next->data->max_key);
+
+ n->data->format = new_f;
+ btree_node_set_format(n, new_f);
+
+ bch2_btree_sort_into(c, n, prev);
+ bch2_btree_sort_into(c, n, next);
+
+ bch2_btree_build_aux_trees(n);
+ bch2_btree_update_add_new_node(as, n);
+ six_unlock_write(&n->c.lock);
+
+ new_path = get_unlocked_mut_path(trans, path->btree_id, n->c.level, n->key.k.p);
+ six_lock_increment(&n->c.lock, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
+ bch2_btree_path_level_init(trans, new_path, n);
+
+ bkey_init(&delete.k);
+ delete.k.p = prev->key.k.p;
+ bch2_keylist_add(&as->parent_keys, &delete);
+ bch2_keylist_add(&as->parent_keys, &n->key);
+
+ bch2_trans_verify_paths(trans);
+
+ ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys, flags);
+ if (ret)
+ goto err_free_update;
+
+ bch2_trans_verify_paths(trans);
+
+ bch2_btree_update_get_open_buckets(as, n);
+ bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
+
+ bch2_btree_node_free_inmem(trans, path, b);
+ bch2_btree_node_free_inmem(trans, sib_path, m);
+
+ bch2_trans_node_add(trans, n);
+
+ bch2_trans_verify_paths(trans);
+
+ six_unlock_intent(&n->c.lock);
+
+ bch2_btree_update_done(as, trans);
+
+ bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time);
+out:
+err:
+ if (new_path)
+ bch2_path_put(trans, new_path, true);
+ bch2_path_put(trans, sib_path, true);
+ bch2_trans_verify_locks(trans);
+ return ret;
+err_free_update:
+ bch2_btree_node_free_never_used(as, trans, n);
+ bch2_btree_update_free(as, trans);
+ goto out;
+}
+
+int bch2_btree_node_rewrite(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path *new_path = NULL;
+ struct btree *n, *parent;
+ struct btree_update *as;
+ int ret;
+
+ flags |= BTREE_INSERT_NOFAIL;
+
+ parent = btree_node_parent(iter->path, b);
+ as = bch2_btree_update_start(trans, iter->path, b->c.level,
+ false, flags);
+ ret = PTR_ERR_OR_ZERO(as);
+ if (ret)
+ goto out;
+
+ bch2_btree_interior_update_will_free_node(as, b);
+
+ n = bch2_btree_node_alloc_replacement(as, trans, b);
+
+ bch2_btree_build_aux_trees(n);
+ bch2_btree_update_add_new_node(as, n);
+ six_unlock_write(&n->c.lock);
+
+ new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p);
+ six_lock_increment(&n->c.lock, SIX_LOCK_intent);
+ mark_btree_node_locked(trans, new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
+ bch2_btree_path_level_init(trans, new_path, n);
+
+ trace_and_count(c, btree_node_rewrite, c, b);
+
+ if (parent) {
+ bch2_keylist_add(&as->parent_keys, &n->key);
+ ret = bch2_btree_insert_node(as, trans, iter->path, parent,
+ &as->parent_keys, flags);
+ if (ret)
+ goto err;
+ } else {
+ bch2_btree_set_root(as, trans, iter->path, n);
+ }
+
+ bch2_btree_update_get_open_buckets(as, n);
+ bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
+
+ bch2_btree_node_free_inmem(trans, iter->path, b);
+
+ bch2_trans_node_add(trans, n);
+ six_unlock_intent(&n->c.lock);
+
+ bch2_btree_update_done(as, trans);
+out:
+ if (new_path)
+ bch2_path_put(trans, new_path, true);
+ bch2_btree_path_downgrade(trans, iter->path);
+ return ret;
+err:
+ bch2_btree_node_free_never_used(as, trans, n);
+ bch2_btree_update_free(as, trans);
+ goto out;
+}
+
+struct async_btree_rewrite {
+ struct bch_fs *c;
+ struct work_struct work;
+ struct list_head list;
+ enum btree_id btree_id;
+ unsigned level;
+ struct bpos pos;
+ __le64 seq;
+};
+
+static int async_btree_node_rewrite_trans(struct btree_trans *trans,
+ struct async_btree_rewrite *a)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct btree *b;
+ int ret;
+
+ bch2_trans_node_iter_init(trans, &iter, a->btree_id, a->pos,
+ BTREE_MAX_DEPTH, a->level, 0);
+ b = bch2_btree_iter_peek_node(&iter);
+ ret = PTR_ERR_OR_ZERO(b);
+ if (ret)
+ goto out;
+
+ if (!b || b->data->keys.seq != a->seq) {
+ struct printbuf buf = PRINTBUF;
+
+ if (b)
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ else
+ prt_str(&buf, "(null");
+ bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s",
+ __func__, a->seq, buf.buf);
+ printbuf_exit(&buf);
+ goto out;
+ }
+
+ ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+static void async_btree_node_rewrite_work(struct work_struct *work)
+{
+ struct async_btree_rewrite *a =
+ container_of(work, struct async_btree_rewrite, work);
+ struct bch_fs *c = a->c;
+ int ret;
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ async_btree_node_rewrite_trans(trans, a));
+ if (ret)
+ bch_err_fn(c, ret);
+ bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
+ kfree(a);
+}
+
+void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
+{
+ struct async_btree_rewrite *a;
+ int ret;
+
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (!a) {
+ bch_err(c, "%s: error allocating memory", __func__);
+ return;
+ }
+
+ a->c = c;
+ a->btree_id = b->c.btree_id;
+ a->level = b->c.level;
+ a->pos = b->key.k.p;
+ a->seq = b->data->keys.seq;
+ INIT_WORK(&a->work, async_btree_node_rewrite_work);
+
+ if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_add(&a->list, &c->pending_node_rewrites);
+ mutex_unlock(&c->pending_node_rewrites_lock);
+ return;
+ }
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
+ if (test_bit(BCH_FS_STARTED, &c->flags)) {
+ bch_err(c, "%s: error getting c->writes ref", __func__);
+ kfree(a);
+ return;
+ }
+
+ ret = bch2_fs_read_write_early(c);
+ if (ret) {
+ bch_err_msg(c, ret, "going read-write");
+ kfree(a);
+ return;
+ }
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ }
+
+ queue_work(c->btree_interior_update_worker, &a->work);
+}
+
+void bch2_do_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
+ queue_work(c->btree_interior_update_worker, &a->work);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
+void bch2_free_pending_node_rewrites(struct bch_fs *c)
+{
+ struct async_btree_rewrite *a, *n;
+
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
+ list_del(&a->list);
+
+ kfree(a);
+ }
+ mutex_unlock(&c->pending_node_rewrites_lock);
+}
+
+static int __bch2_btree_node_update_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree *b, struct btree *new_hash,
+ struct bkey_i *new_key,
+ unsigned commit_flags,
+ bool skip_triggers)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter2 = { NULL };
+ struct btree *parent;
+ int ret;
+
+ if (!skip_triggers) {
+ ret = bch2_trans_mark_old(trans, b->c.btree_id, b->c.level + 1,
+ bkey_i_to_s_c(&b->key), 0);
+ if (ret)
+ return ret;
+
+ ret = bch2_trans_mark_new(trans, b->c.btree_id, b->c.level + 1,
+ new_key, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (new_hash) {
+ bkey_copy(&new_hash->key, new_key);
+ ret = bch2_btree_node_hash_insert(&c->btree_cache,
+ new_hash, b->c.level, b->c.btree_id);
+ BUG_ON(ret);
+ }
+
+ parent = btree_node_parent(iter->path, b);
+ if (parent) {
+ bch2_trans_copy_iter(&iter2, iter);
+
+ iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
+ iter2.flags & BTREE_ITER_INTENT,
+ _THIS_IP_);
+
+ BUG_ON(iter2.path->level != b->c.level);
+ BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
+
+ btree_path_set_level_up(trans, iter2.path);
+
+ trans->paths_sorted = false;
+
+ ret = bch2_btree_iter_traverse(&iter2) ?:
+ bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN);
+ if (ret)
+ goto err;
+ } else {
+ BUG_ON(btree_node_root(c, b) != b);
+
+ ret = darray_make_room(&trans->extra_journal_entries,
+ jset_u64s(new_key->k.u64s));
+ if (ret)
+ return ret;
+
+ journal_entry_set((void *) &darray_top(trans->extra_journal_entries),
+ BCH_JSET_ENTRY_btree_root,
+ b->c.btree_id, b->c.level,
+ new_key, new_key->k.u64s);
+ trans->extra_journal_entries.nr += jset_u64s(new_key->k.u64s);
+ }
+
+ ret = bch2_trans_commit(trans, NULL, NULL, commit_flags);
+ if (ret)
+ goto err;
+
+ bch2_btree_node_lock_write_nofail(trans, iter->path, &b->c);
+
+ if (new_hash) {
+ mutex_lock(&c->btree_cache.lock);
+ bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ bkey_copy(&b->key, new_key);
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
+ BUG_ON(ret);
+ mutex_unlock(&c->btree_cache.lock);
+ } else {
+ bkey_copy(&b->key, new_key);
+ }
+
+ bch2_btree_node_unlock_write(trans, iter->path, b);
+out:
+ bch2_trans_iter_exit(trans, &iter2);
+ return ret;
+err:
+ if (new_hash) {
+ mutex_lock(&c->btree_cache.lock);
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+ mutex_unlock(&c->btree_cache.lock);
+ }
+ goto out;
+}
+
+int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
+ struct btree *b, struct bkey_i *new_key,
+ unsigned commit_flags, bool skip_triggers)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *new_hash = NULL;
+ struct btree_path *path = iter->path;
+ struct closure cl;
+ int ret = 0;
+
+ ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1);
+ if (ret)
+ return ret;
+
+ closure_init_stack(&cl);
+
+ /*
+ * check btree_ptr_hash_val() after @b is locked by
+ * btree_iter_traverse():
+ */
+ if (btree_ptr_hash_val(new_key) != b->hash_val) {
+ ret = bch2_btree_cache_cannibalize_lock(c, &cl);
+ if (ret) {
+ ret = drop_locks_do(trans, (closure_sync(&cl), 0));
+ if (ret)
+ return ret;
+ }
+
+ new_hash = bch2_btree_node_mem_alloc(trans, false);
+ }
+
+ path->intent_ref++;
+ ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key,
+ commit_flags, skip_triggers);
+ --path->intent_ref;
+
+ if (new_hash) {
+ mutex_lock(&c->btree_cache.lock);
+ list_move(&new_hash->list, &c->btree_cache.freeable);
+ mutex_unlock(&c->btree_cache.lock);
+
+ six_unlock_write(&new_hash->c.lock);
+ six_unlock_intent(&new_hash->c.lock);
+ }
+ closure_sync(&cl);
+ bch2_btree_cache_cannibalize_unlock(c);
+ return ret;
+}
+
+int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
+ struct btree *b, struct bkey_i *new_key,
+ unsigned commit_flags, bool skip_triggers)
+{
+ struct btree_iter iter;
+ int ret;
+
+ bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
+ BTREE_MAX_DEPTH, b->c.level,
+ BTREE_ITER_INTENT);
+ ret = bch2_btree_iter_traverse(&iter);
+ if (ret)
+ goto out;
+
+ /* has node been freed? */
+ if (iter.path->l[b->c.level].b != b) {
+ /* node has been freed: */
+ BUG_ON(!btree_node_dying(b));
+ goto out;
+ }
+
+ BUG_ON(!btree_node_hashed(b));
+
+ ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
+ commit_flags, skip_triggers);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/* Init code: */
+
+/*
+ * Only for filesystem bringup, when first reading the btree roots or allocating
+ * btree roots when initializing a new filesystem:
+ */
+void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
+{
+ BUG_ON(btree_node_root(c, b));
+
+ bch2_btree_set_root_inmem(c, b);
+}
+
+static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
+{
+ struct bch_fs *c = trans->c;
+ struct closure cl;
+ struct btree *b;
+ int ret;
+
+ closure_init_stack(&cl);
+
+ do {
+ ret = bch2_btree_cache_cannibalize_lock(c, &cl);
+ closure_sync(&cl);
+ } while (ret);
+
+ b = bch2_btree_node_mem_alloc(trans, false);
+ bch2_btree_cache_cannibalize_unlock(c);
+
+ set_btree_node_fake(b);
+ set_btree_node_need_rewrite(b);
+ b->c.level = 0;
+ b->c.btree_id = id;
+
+ bkey_btree_ptr_init(&b->key);
+ b->key.k.p = SPOS_MAX;
+ *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id;
+
+ bch2_bset_init_first(b, &b->data->keys);
+ bch2_btree_build_aux_trees(b);
+
+ b->data->flags = 0;
+ btree_set_min(b, POS_MIN);
+ btree_set_max(b, SPOS_MAX);
+ b->data->format = bch2_btree_calc_format(b);
+ btree_node_set_format(b, b->data->format);
+
+ ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
+ b->c.level, b->c.btree_id);
+ BUG_ON(ret);
+
+ bch2_btree_set_root_inmem(c, b);
+
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ return 0;
+}
+
+void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+{
+ bch2_trans_run(c, __bch2_btree_root_alloc(trans, id));
+}
+
+void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct btree_update *as;
+
+ mutex_lock(&c->btree_interior_update_lock);
+ list_for_each_entry(as, &c->btree_interior_update_list, list)
+ prt_printf(out, "%p m %u w %u r %u j %llu\n",
+ as,
+ as->mode,
+ as->nodes_written,
+ closure_nr_remaining(&as->cl),
+ as->journal.seq);
+ mutex_unlock(&c->btree_interior_update_lock);
+}
+
+static bool bch2_btree_interior_updates_pending(struct bch_fs *c)
+{
+ bool ret;
+
+ mutex_lock(&c->btree_interior_update_lock);
+ ret = !list_empty(&c->btree_interior_update_list);
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ return ret;
+}
+
+bool bch2_btree_interior_updates_flush(struct bch_fs *c)
+{
+ bool ret = bch2_btree_interior_updates_pending(c);
+
+ if (ret)
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_pending(c));
+ return ret;
+}
+
+void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry)
+{
+ struct btree_root *r = bch2_btree_id_root(c, entry->btree_id);
+
+ mutex_lock(&c->btree_root_lock);
+
+ r->level = entry->level;
+ r->alive = true;
+ bkey_copy(&r->key, &entry->start[0]);
+
+ mutex_unlock(&c->btree_root_lock);
+}
+
+struct jset_entry *
+bch2_btree_roots_to_journal_entries(struct bch_fs *c,
+ struct jset_entry *start,
+ struct jset_entry *end)
+{
+ struct jset_entry *entry;
+ unsigned long have = 0;
+ unsigned i;
+
+ for (entry = start; entry < end; entry = vstruct_next(entry))
+ if (entry->type == BCH_JSET_ENTRY_btree_root)
+ __set_bit(entry->btree_id, &have);
+
+ mutex_lock(&c->btree_root_lock);
+
+ for (i = 0; i < btree_id_nr_alive(c); i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (r->alive && !test_bit(i, &have)) {
+ journal_entry_set(end, BCH_JSET_ENTRY_btree_root,
+ i, r->level, &r->key, r->key.k.u64s);
+ end = vstruct_next(end);
+ }
+ }
+
+ mutex_unlock(&c->btree_root_lock);
+
+ return end;
+}
+
+void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
+{
+ if (c->btree_interior_update_worker)
+ destroy_workqueue(c->btree_interior_update_worker);
+ mempool_exit(&c->btree_interior_update_pool);
+}
+
+void bch2_fs_btree_interior_update_init_early(struct bch_fs *c)
+{
+ mutex_init(&c->btree_reserve_cache_lock);
+ INIT_LIST_HEAD(&c->btree_interior_update_list);
+ INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
+ mutex_init(&c->btree_interior_update_lock);
+ INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
+
+ INIT_LIST_HEAD(&c->pending_node_rewrites);
+ mutex_init(&c->pending_node_rewrites_lock);
+}
+
+int bch2_fs_btree_interior_update_init(struct bch_fs *c)
+{
+ c->btree_interior_update_worker =
+ alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
+ if (!c->btree_interior_update_worker)
+ return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
+
+ if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
+ sizeof(struct btree_update)))
+ return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;
+
+ return 0;
+}
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
new file mode 100644
index 000000000000..5e0a467fe905
--- /dev/null
+++ b/fs/bcachefs/btree_update_interior.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_UPDATE_INTERIOR_H
+#define _BCACHEFS_BTREE_UPDATE_INTERIOR_H
+
+#include "btree_cache.h"
+#include "btree_locking.h"
+#include "btree_update.h"
+
+void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
+bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
+ struct bkey_format *);
+
+#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
+
+#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
+
+/*
+ * Tracks an in progress split/rewrite of a btree node and the update to the
+ * parent node:
+ *
+ * When we split/rewrite a node, we do all the updates in memory without
+ * waiting for any writes to complete - we allocate the new node(s) and update
+ * the parent node, possibly recursively up to the root.
+ *
+ * The end result is that we have one or more new nodes being written -
+ * possibly several, if there were multiple splits - and then a write (updating
+ * an interior node) which will make all these new nodes visible.
+ *
+ * Additionally, as we split/rewrite nodes we free the old nodes - but the old
+ * nodes can't be freed (their space on disk can't be reclaimed) until the
+ * update to the interior node that makes the new node visible completes -
+ * until then, the old nodes are still reachable on disk.
+ *
+ */
+struct btree_update {
+ struct closure cl;
+ struct bch_fs *c;
+ u64 start_time;
+
+ struct list_head list;
+ struct list_head unwritten_list;
+
+ /* What kind of update are we doing? */
+ enum {
+ BTREE_INTERIOR_NO_UPDATE,
+ BTREE_INTERIOR_UPDATING_NODE,
+ BTREE_INTERIOR_UPDATING_ROOT,
+ BTREE_INTERIOR_UPDATING_AS,
+ } mode;
+
+ unsigned nodes_written:1;
+ unsigned took_gc_lock:1;
+
+ enum btree_id btree_id;
+ unsigned update_level;
+
+ struct disk_reservation disk_res;
+ struct journal_preres journal_preres;
+
+ /*
+ * BTREE_INTERIOR_UPDATING_NODE:
+ * The update that made the new nodes visible was a regular update to an
+ * existing interior node - @b. We can't write out the update to @b
+ * until the new nodes we created are finished writing, so we block @b
+ * from writing by putting this btree_interior update on the
+ * @b->write_blocked list with @write_blocked_list:
+ */
+ struct btree *b;
+ struct list_head write_blocked_list;
+
+ /*
+ * We may be freeing nodes that were dirty, and thus had journal entries
+ * pinned: we need to transfer the oldest of those pins to the
+ * btree_update operation, and release it when the new node(s)
+ * are all persistent and reachable:
+ */
+ struct journal_entry_pin journal;
+
+ /* Preallocated nodes we reserve when we start the update: */
+ struct prealloc_nodes {
+ struct btree *b[BTREE_UPDATE_NODES_MAX];
+ unsigned nr;
+ } prealloc_nodes[2];
+
+ /* Nodes being freed: */
+ struct keylist old_keys;
+ u64 _old_keys[BTREE_UPDATE_NODES_MAX *
+ BKEY_BTREE_PTR_U64s_MAX];
+
+ /* Nodes being added: */
+ struct keylist new_keys;
+ u64 _new_keys[BTREE_UPDATE_NODES_MAX *
+ BKEY_BTREE_PTR_U64s_MAX];
+
+ /* New nodes, that will be made reachable by this update: */
+ struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
+ unsigned nr_new_nodes;
+
+ struct btree *old_nodes[BTREE_UPDATE_NODES_MAX];
+ __le64 old_nodes_seq[BTREE_UPDATE_NODES_MAX];
+ unsigned nr_old_nodes;
+
+ open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX *
+ BCH_REPLICAS_MAX];
+ open_bucket_idx_t nr_open_buckets;
+
+ unsigned journal_u64s;
+ u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
+
+ /* Only here to reduce stack usage on recursive splits: */
+ struct keylist parent_keys;
+ /*
+ * Enough room for btree_split's keys without realloc - btree node
+ * pointers never have crc/compression info, so we only need to acount
+ * for the pointers for three keys
+ */
+ u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
+};
+
+struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
+ struct btree_trans *,
+ struct btree *,
+ struct bkey_format);
+
+int bch2_btree_split_leaf(struct btree_trans *, struct btree_path *, unsigned);
+
+int __bch2_foreground_maybe_merge(struct btree_trans *, struct btree_path *,
+ unsigned, unsigned, enum btree_node_sibling);
+
+static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned level, unsigned flags,
+ enum btree_node_sibling sib)
+{
+ struct btree *b;
+
+ EBUG_ON(!btree_node_locked(path, level));
+
+ b = path->l[level].b;
+ if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
+ return 0;
+
+ return __bch2_foreground_maybe_merge(trans, path, level, flags, sib);
+}
+
+static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
+ struct btree_path *path,
+ unsigned level,
+ unsigned flags)
+{
+ return bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+ btree_prev_sib) ?:
+ bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
+ btree_next_sib);
+}
+
+int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
+ struct btree *, unsigned);
+void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
+int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
+ struct btree *, struct bkey_i *,
+ unsigned, bool);
+int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
+ struct bkey_i *, unsigned, bool);
+
+void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
+void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
+
+static inline unsigned btree_update_reserve_required(struct bch_fs *c,
+ struct btree *b)
+{
+ unsigned depth = btree_node_root(c, b)->c.level + 1;
+
+ /*
+ * Number of nodes we might have to allocate in a worst case btree
+ * split operation - we split all the way up to the root, then allocate
+ * a new root, unless we're already at max depth:
+ */
+ if (depth < BTREE_MAX_DEPTH)
+ return (depth - b->c.level) * 2 + 1;
+ else
+ return (depth - b->c.level) * 2 - 1;
+}
+
+static inline void btree_node_reset_sib_u64s(struct btree *b)
+{
+ b->sib_u64s[0] = b->nr.live_u64s;
+ b->sib_u64s[1] = b->nr.live_u64s;
+}
+
+static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
+{
+ return (void *) b->data + btree_bytes(c);
+}
+
+static inline struct bkey_packed *unwritten_whiteouts_start(struct bch_fs *c,
+ struct btree *b)
+{
+ return (void *) ((u64 *) btree_data_end(c, b) - b->whiteout_u64s);
+}
+
+static inline struct bkey_packed *unwritten_whiteouts_end(struct bch_fs *c,
+ struct btree *b)
+{
+ return btree_data_end(c, b);
+}
+
+static inline void *write_block(struct btree *b)
+{
+ return (void *) b->data + (b->written << 9);
+}
+
+static inline bool __btree_addr_written(struct btree *b, void *p)
+{
+ return p < write_block(b);
+}
+
+static inline bool bset_written(struct btree *b, struct bset *i)
+{
+ return __btree_addr_written(b, i);
+}
+
+static inline bool bkey_written(struct btree *b, struct bkey_packed *k)
+{
+ return __btree_addr_written(b, k);
+}
+
+static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
+ struct btree *b,
+ void *end)
+{
+ ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
+ b->whiteout_u64s;
+ ssize_t total = c->opts.btree_node_size >> 3;
+
+ /* Always leave one extra u64 for bch2_varint_decode: */
+ used++;
+
+ return total - used;
+}
+
+static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
+ struct btree *b)
+{
+ ssize_t remaining = __bch_btree_u64s_remaining(c, b,
+ btree_bkey_last(b, bset_tree_last(b)));
+
+ BUG_ON(remaining < 0);
+
+ if (bset_written(b, btree_bset_last(b)))
+ return 0;
+
+ return remaining;
+}
+
+#define BTREE_WRITE_SET_U64s_BITS 9
+
+static inline unsigned btree_write_set_buffer(struct btree *b)
+{
+ /*
+ * Could buffer up larger amounts of keys for btrees with larger keys,
+ * pending benchmarking:
+ */
+ return 8 << BTREE_WRITE_SET_U64s_BITS;
+}
+
+static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
+ struct btree *b)
+{
+ struct bset_tree *t = bset_tree_last(b);
+ struct btree_node_entry *bne = max(write_block(b),
+ (void *) btree_bkey_last(b, bset_tree_last(b)));
+ ssize_t remaining_space =
+ __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
+
+ if (unlikely(bset_written(b, bset(b, t)))) {
+ if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
+ return bne;
+ } else {
+ if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) &&
+ remaining_space > (ssize_t) (btree_write_set_buffer(b) >> 3))
+ return bne;
+ }
+
+ return NULL;
+}
+
+static inline void push_whiteout(struct bch_fs *c, struct btree *b,
+ struct bpos pos)
+{
+ struct bkey_packed k;
+
+ BUG_ON(bch_btree_keys_u64s_remaining(c, b) < BKEY_U64s);
+ EBUG_ON(btree_node_just_written(b));
+
+ if (!bkey_pack_pos(&k, pos, b)) {
+ struct bkey *u = (void *) &k;
+
+ bkey_init(u);
+ u->p = pos;
+ }
+
+ k.needs_whiteout = true;
+
+ b->whiteout_u64s += k.u64s;
+ bkey_copy(unwritten_whiteouts_start(c, b), &k);
+}
+
+/*
+ * write lock must be held on @b (else the dirty bset that we were going to
+ * insert into could be written out from under us)
+ */
+static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
+ struct btree *b, unsigned u64s)
+{
+ if (unlikely(btree_node_need_rewrite(b)))
+ return false;
+
+ return u64s <= bch_btree_keys_u64s_remaining(c, b);
+}
+
+void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
+
+bool bch2_btree_interior_updates_flush(struct bch_fs *);
+
+void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
+struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
+ struct jset_entry *, struct jset_entry *);
+
+void bch2_do_pending_node_rewrites(struct bch_fs *);
+void bch2_free_pending_node_rewrites(struct bch_fs *);
+
+void bch2_fs_btree_interior_update_exit(struct bch_fs *);
+void bch2_fs_btree_interior_update_init_early(struct bch_fs *);
+int bch2_fs_btree_interior_update_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
new file mode 100644
index 000000000000..4e6241db518b
--- /dev/null
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_locking.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "btree_write_buffer.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_reclaim.h"
+
+#include <linux/sort.h>
+
+static int btree_write_buffered_key_cmp(const void *_l, const void *_r)
+{
+ const struct btree_write_buffered_key *l = _l;
+ const struct btree_write_buffered_key *r = _r;
+
+ return cmp_int(l->btree, r->btree) ?:
+ bpos_cmp(l->k.k.p, r->k.k.p) ?:
+ cmp_int(l->journal_seq, r->journal_seq) ?:
+ cmp_int(l->journal_offset, r->journal_offset);
+}
+
+static int btree_write_buffered_journal_cmp(const void *_l, const void *_r)
+{
+ const struct btree_write_buffered_key *l = _l;
+ const struct btree_write_buffered_key *r = _r;
+
+ return cmp_int(l->journal_seq, r->journal_seq);
+}
+
+static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct btree_write_buffered_key *wb,
+ unsigned commit_flags,
+ bool *write_locked,
+ size_t *fast)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_path *path;
+ int ret;
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
+ path = iter->path;
+
+ if (!*write_locked) {
+ ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
+ if (ret)
+ return ret;
+
+ bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
+ *write_locked = true;
+ }
+
+ if (!bch2_btree_node_insert_fits(c, path->l[0].b, wb->k.k.u64s)) {
+ bch2_btree_node_unlock_write(trans, path, path->l[0].b);
+ *write_locked = false;
+ goto trans_commit;
+ }
+
+ bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
+ (*fast)++;
+
+ if (path->ref > 1) {
+ /*
+ * We can't clone a path that has write locks: if the path is
+ * shared, unlock before set_pos(), traverse():
+ */
+ bch2_btree_node_unlock_write(trans, path, path->l[0].b);
+ *write_locked = false;
+ }
+ return 0;
+trans_commit:
+ return bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ commit_flags|
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_JOURNAL_RECLAIM);
+}
+
+static union btree_write_buffer_state btree_write_buffer_switch(struct btree_write_buffer *wb)
+{
+ union btree_write_buffer_state old, new;
+ u64 v = READ_ONCE(wb->state.v);
+
+ do {
+ old.v = new.v = v;
+
+ new.nr = 0;
+ new.idx++;
+ } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
+
+ while (old.idx == 0 ? wb->state.ref0 : wb->state.ref1)
+ cpu_relax();
+
+ smp_mb();
+
+ return old;
+}
+
+/*
+ * Update a btree with a write buffered key using the journal seq of the
+ * original write buffer insert.
+ *
+ * It is not safe to rejournal the key once it has been inserted into the write
+ * buffer because that may break recovery ordering. For example, the key may
+ * have already been modified in the active write buffer in a seq that comes
+ * before the current transaction. If we were to journal this key again and
+ * crash, recovery would process updates in the wrong order.
+ */
+static int
+btree_write_buffered_insert(struct btree_trans *trans,
+ struct btree_write_buffered_key *wb)
+{
+ struct btree_iter iter;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
+ BTREE_ITER_CACHED|BTREE_ITER_INTENT);
+
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
+ bool locked)
+{
+ struct bch_fs *c = trans->c;
+ struct journal *j = &c->journal;
+ struct btree_write_buffer *wb = &c->btree_write_buffer;
+ struct journal_entry_pin pin;
+ struct btree_write_buffered_key *i, *keys;
+ struct btree_iter iter = { NULL };
+ size_t nr = 0, skipped = 0, fast = 0, slowpath = 0;
+ bool write_locked = false;
+ union btree_write_buffer_state s;
+ int ret = 0;
+
+ memset(&pin, 0, sizeof(pin));
+
+ if (!locked && !mutex_trylock(&wb->flush_lock))
+ return 0;
+
+ bch2_journal_pin_copy(j, &pin, &wb->journal_pin, NULL);
+ bch2_journal_pin_drop(j, &wb->journal_pin);
+
+ s = btree_write_buffer_switch(wb);
+ keys = wb->keys[s.idx];
+ nr = s.nr;
+
+ if (race_fault())
+ goto slowpath;
+
+ /*
+ * We first sort so that we can detect and skip redundant updates, and
+ * then we attempt to flush in sorted btree order, as this is most
+ * efficient.
+ *
+ * However, since we're not flushing in the order they appear in the
+ * journal we won't be able to drop our journal pin until everything is
+ * flushed - which means this could deadlock the journal if we weren't
+ * passing BTREE_INSERT_JOURNAL_RECLAIM. This causes the update to fail
+ * if it would block taking a journal reservation.
+ *
+ * If that happens, simply skip the key so we can optimistically insert
+ * as many keys as possible in the fast path.
+ */
+ sort(keys, nr, sizeof(keys[0]),
+ btree_write_buffered_key_cmp, NULL);
+
+ for (i = keys; i < keys + nr; i++) {
+ if (i + 1 < keys + nr &&
+ i[0].btree == i[1].btree &&
+ bpos_eq(i[0].k.k.p, i[1].k.k.p)) {
+ skipped++;
+ i->journal_seq = 0;
+ continue;
+ }
+
+ if (write_locked &&
+ (iter.path->btree_id != i->btree ||
+ bpos_gt(i->k.k.p, iter.path->l[0].b->key.k.p))) {
+ bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
+ write_locked = false;
+ }
+
+ if (!iter.path || iter.path->btree_id != i->btree) {
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_iter_init(trans, &iter, i->btree, i->k.k.p,
+ BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
+ }
+
+ bch2_btree_iter_set_pos(&iter, i->k.k.p);
+ iter.path->preserve = false;
+
+ do {
+ ret = bch2_btree_write_buffer_flush_one(trans, &iter, i,
+ commit_flags, &write_locked, &fast);
+ if (!write_locked)
+ bch2_trans_begin(trans);
+ } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
+
+ if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
+ slowpath++;
+ continue;
+ }
+ if (ret)
+ break;
+
+ i->journal_seq = 0;
+ }
+
+ if (write_locked)
+ bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
+ bch2_trans_iter_exit(trans, &iter);
+
+ trace_write_buffer_flush(trans, nr, skipped, fast, wb->size);
+
+ if (slowpath)
+ goto slowpath;
+
+ bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret));
+out:
+ bch2_journal_pin_drop(j, &pin);
+ mutex_unlock(&wb->flush_lock);
+ return ret;
+slowpath:
+ trace_write_buffer_flush_slowpath(trans, i - keys, nr);
+
+ /*
+ * Now sort the rest by journal seq and bump the journal pin as we go.
+ * The slowpath zapped the seq of keys that were successfully flushed so
+ * we can skip those here.
+ */
+ sort(keys, nr, sizeof(keys[0]),
+ btree_write_buffered_journal_cmp,
+ NULL);
+
+ commit_flags &= ~BCH_WATERMARK_MASK;
+ commit_flags |= BCH_WATERMARK_reclaim;
+
+ for (i = keys; i < keys + nr; i++) {
+ if (!i->journal_seq)
+ continue;
+
+ if (i->journal_seq > pin.seq) {
+ struct journal_entry_pin pin2;
+
+ memset(&pin2, 0, sizeof(pin2));
+
+ bch2_journal_pin_add(j, i->journal_seq, &pin2, NULL);
+ bch2_journal_pin_drop(j, &pin);
+ bch2_journal_pin_copy(j, &pin, &pin2, NULL);
+ bch2_journal_pin_drop(j, &pin2);
+ }
+
+ ret = commit_do(trans, NULL, NULL,
+ commit_flags|
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_JOURNAL_RECLAIM,
+ btree_write_buffered_insert(trans, i));
+ if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
+ break;
+ }
+
+ goto out;
+}
+
+int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
+{
+ bch2_trans_unlock(trans);
+ mutex_lock(&trans->c->btree_write_buffer.flush_lock);
+ return __bch2_btree_write_buffer_flush(trans, 0, true);
+}
+
+int bch2_btree_write_buffer_flush(struct btree_trans *trans)
+{
+ return __bch2_btree_write_buffer_flush(trans, 0, false);
+}
+
+static int bch2_btree_write_buffer_journal_flush(struct journal *j,
+ struct journal_entry_pin *_pin, u64 seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct btree_write_buffer *wb = &c->btree_write_buffer;
+
+ mutex_lock(&wb->flush_lock);
+
+ return bch2_trans_run(c,
+ __bch2_btree_write_buffer_flush(trans, BTREE_INSERT_NOCHECK_RW, true));
+}
+
+static inline u64 btree_write_buffer_ref(int idx)
+{
+ return ((union btree_write_buffer_state) {
+ .ref0 = idx == 0,
+ .ref1 = idx == 1,
+ }).v;
+}
+
+int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_write_buffer *wb = &c->btree_write_buffer;
+ struct btree_write_buffered_key *i;
+ union btree_write_buffer_state old, new;
+ int ret = 0;
+ u64 v;
+
+ trans_for_each_wb_update(trans, i) {
+ EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
+
+ i->journal_seq = trans->journal_res.seq;
+ i->journal_offset = trans->journal_res.offset;
+ }
+
+ preempt_disable();
+ v = READ_ONCE(wb->state.v);
+ do {
+ old.v = new.v = v;
+
+ new.v += btree_write_buffer_ref(new.idx);
+ new.nr += trans->nr_wb_updates;
+ if (new.nr > wb->size) {
+ ret = -BCH_ERR_btree_insert_need_flush_buffer;
+ goto out;
+ }
+ } while ((v = atomic64_cmpxchg_acquire(&wb->state.counter, old.v, new.v)) != old.v);
+
+ memcpy(wb->keys[new.idx] + old.nr,
+ trans->wb_updates,
+ sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
+
+ bch2_journal_pin_add(&c->journal, trans->journal_res.seq, &wb->journal_pin,
+ bch2_btree_write_buffer_journal_flush);
+
+ atomic64_sub_return_release(btree_write_buffer_ref(new.idx), &wb->state.counter);
+out:
+ preempt_enable();
+ return ret;
+}
+
+void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
+{
+ struct btree_write_buffer *wb = &c->btree_write_buffer;
+
+ BUG_ON(wb->state.nr && !bch2_journal_error(&c->journal));
+
+ kvfree(wb->keys[1]);
+ kvfree(wb->keys[0]);
+}
+
+int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
+{
+ struct btree_write_buffer *wb = &c->btree_write_buffer;
+
+ mutex_init(&wb->flush_lock);
+ wb->size = c->opts.btree_write_buffer_size;
+
+ wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
+ wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
+ if (!wb->keys[0] || !wb->keys[1])
+ return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;
+
+ return 0;
+}
diff --git a/fs/bcachefs/btree_write_buffer.h b/fs/bcachefs/btree_write_buffer.h
new file mode 100644
index 000000000000..322df1c8304e
--- /dev/null
+++ b/fs/bcachefs/btree_write_buffer.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_H
+#define _BCACHEFS_BTREE_WRITE_BUFFER_H
+
+int __bch2_btree_write_buffer_flush(struct btree_trans *, unsigned, bool);
+int bch2_btree_write_buffer_flush_sync(struct btree_trans *);
+int bch2_btree_write_buffer_flush(struct btree_trans *);
+
+int bch2_btree_insert_keys_write_buffer(struct btree_trans *);
+
+void bch2_fs_btree_write_buffer_exit(struct bch_fs *);
+int bch2_fs_btree_write_buffer_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_BTREE_WRITE_BUFFER_H */
diff --git a/fs/bcachefs/btree_write_buffer_types.h b/fs/bcachefs/btree_write_buffer_types.h
new file mode 100644
index 000000000000..99993ba77aea
--- /dev/null
+++ b/fs/bcachefs/btree_write_buffer_types.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_WRITE_BUFFER_TYPES_H
+#define _BCACHEFS_BTREE_WRITE_BUFFER_TYPES_H
+
+#include "journal_types.h"
+
+#define BTREE_WRITE_BUFERED_VAL_U64s_MAX 4
+#define BTREE_WRITE_BUFERED_U64s_MAX (BKEY_U64s + BTREE_WRITE_BUFERED_VAL_U64s_MAX)
+
+struct btree_write_buffered_key {
+ u64 journal_seq;
+ unsigned journal_offset;
+ enum btree_id btree;
+ __BKEY_PADDED(k, BTREE_WRITE_BUFERED_VAL_U64s_MAX);
+};
+
+union btree_write_buffer_state {
+ struct {
+ atomic64_t counter;
+ };
+
+ struct {
+ u64 v;
+ };
+
+ struct {
+ u64 nr:23;
+ u64 idx:1;
+ u64 ref0:20;
+ u64 ref1:20;
+ };
+};
+
+struct btree_write_buffer {
+ struct mutex flush_lock;
+ struct journal_entry_pin journal_pin;
+
+ union btree_write_buffer_state state;
+ size_t size;
+
+ struct btree_write_buffered_key *keys[2];
+};
+
+#endif /* _BCACHEFS_BTREE_WRITE_BUFFER_TYPES_H */
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
new file mode 100644
index 000000000000..a1a4b5feadaa
--- /dev/null
+++ b/fs/bcachefs/buckets.c
@@ -0,0 +1,2106 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code for manipulating bucket marks for garbage collection.
+ *
+ * Copyright 2014 Datera, Inc.
+ */
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "backpointers.h"
+#include "bset.h"
+#include "btree_gc.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "buckets_waiting_for_journal.h"
+#include "ec.h"
+#include "error.h"
+#include "inode.h"
+#include "movinggc.h"
+#include "recovery.h"
+#include "reflink.h"
+#include "replicas.h"
+#include "subvolume.h"
+#include "trace.h"
+
+#include <linux/preempt.h>
+
+static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
+ enum bch_data_type data_type,
+ s64 sectors)
+{
+ switch (data_type) {
+ case BCH_DATA_btree:
+ fs_usage->btree += sectors;
+ break;
+ case BCH_DATA_user:
+ case BCH_DATA_parity:
+ fs_usage->data += sectors;
+ break;
+ case BCH_DATA_cached:
+ fs_usage->cached += sectors;
+ break;
+ default:
+ break;
+ }
+}
+
+void bch2_fs_usage_initialize(struct bch_fs *c)
+{
+ struct bch_fs_usage *usage;
+ struct bch_dev *ca;
+ unsigned i;
+
+ percpu_down_write(&c->mark_lock);
+ usage = c->usage_base;
+
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ bch2_fs_usage_acc_to_base(c, i);
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ usage->reserved += usage->persistent_reserved[i];
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+
+ fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
+ }
+
+ for_each_member_device(ca, c, i) {
+ struct bch_dev_usage dev = bch2_dev_usage_read(ca);
+
+ usage->hidden += (dev.d[BCH_DATA_sb].buckets +
+ dev.d[BCH_DATA_journal].buckets) *
+ ca->mi.bucket_size;
+ }
+
+ percpu_up_write(&c->mark_lock);
+}
+
+static inline struct bch_dev_usage *dev_usage_ptr(struct bch_dev *ca,
+ unsigned journal_seq,
+ bool gc)
+{
+ BUG_ON(!gc && !journal_seq);
+
+ return this_cpu_ptr(gc
+ ? ca->usage_gc
+ : ca->usage[journal_seq & JOURNAL_BUF_MASK]);
+}
+
+void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
+{
+ struct bch_fs *c = ca->fs;
+ unsigned seq, i, u64s = dev_usage_u64s();
+
+ do {
+ seq = read_seqcount_begin(&c->usage_lock);
+ memcpy(usage, ca->usage_base, u64s * sizeof(u64));
+ for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
+ acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage[i], u64s);
+ } while (read_seqcount_retry(&c->usage_lock, seq));
+}
+
+u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
+{
+ ssize_t offset = v - (u64 *) c->usage_base;
+ unsigned i, seq;
+ u64 ret;
+
+ BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
+ percpu_rwsem_assert_held(&c->mark_lock);
+
+ do {
+ seq = read_seqcount_begin(&c->usage_lock);
+ ret = *v;
+
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
+ } while (read_seqcount_retry(&c->usage_lock, seq));
+
+ return ret;
+}
+
+struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
+{
+ struct bch_fs_usage_online *ret;
+ unsigned nr_replicas = READ_ONCE(c->replicas.nr);
+ unsigned seq, i;
+retry:
+ ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_KERNEL);
+ if (unlikely(!ret))
+ return NULL;
+
+ percpu_down_read(&c->mark_lock);
+
+ if (nr_replicas != c->replicas.nr) {
+ nr_replicas = c->replicas.nr;
+ percpu_up_read(&c->mark_lock);
+ kfree(ret);
+ goto retry;
+ }
+
+ ret->online_reserved = percpu_u64_get(c->online_reserved);
+
+ do {
+ seq = read_seqcount_begin(&c->usage_lock);
+ unsafe_memcpy(&ret->u, c->usage_base,
+ __fs_usage_u64s(nr_replicas) * sizeof(u64),
+ "embedded variable length struct");
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
+ __fs_usage_u64s(nr_replicas));
+ } while (read_seqcount_retry(&c->usage_lock, seq));
+
+ return ret;
+}
+
+void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
+{
+ struct bch_dev *ca;
+ unsigned i, u64s = fs_usage_u64s(c);
+
+ BUG_ON(idx >= ARRAY_SIZE(c->usage));
+
+ preempt_disable();
+ write_seqcount_begin(&c->usage_lock);
+
+ acc_u64s_percpu((u64 *) c->usage_base,
+ (u64 __percpu *) c->usage[idx], u64s);
+ percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL) {
+ u64s = dev_usage_u64s();
+
+ acc_u64s_percpu((u64 *) ca->usage_base,
+ (u64 __percpu *) ca->usage[idx], u64s);
+ percpu_memset(ca->usage[idx], 0, u64s * sizeof(u64));
+ }
+ rcu_read_unlock();
+
+ write_seqcount_end(&c->usage_lock);
+ preempt_enable();
+}
+
+void bch2_fs_usage_to_text(struct printbuf *out,
+ struct bch_fs *c,
+ struct bch_fs_usage_online *fs_usage)
+{
+ unsigned i;
+
+ prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
+
+ prt_printf(out, "hidden:\t\t\t\t%llu\n",
+ fs_usage->u.hidden);
+ prt_printf(out, "data:\t\t\t\t%llu\n",
+ fs_usage->u.data);
+ prt_printf(out, "cached:\t\t\t\t%llu\n",
+ fs_usage->u.cached);
+ prt_printf(out, "reserved:\t\t\t%llu\n",
+ fs_usage->u.reserved);
+ prt_printf(out, "nr_inodes:\t\t\t%llu\n",
+ fs_usage->u.nr_inodes);
+ prt_printf(out, "online reserved:\t\t%llu\n",
+ fs_usage->online_reserved);
+
+ for (i = 0;
+ i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
+ i++) {
+ prt_printf(out, "%u replicas:\n", i + 1);
+ prt_printf(out, "\treserved:\t\t%llu\n",
+ fs_usage->u.persistent_reserved[i]);
+ }
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+
+ prt_printf(out, "\t");
+ bch2_replicas_entry_to_text(out, e);
+ prt_printf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
+ }
+}
+
+static u64 reserve_factor(u64 r)
+{
+ return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
+}
+
+u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
+{
+ return min(fs_usage->u.hidden +
+ fs_usage->u.btree +
+ fs_usage->u.data +
+ reserve_factor(fs_usage->u.reserved +
+ fs_usage->online_reserved),
+ c->capacity);
+}
+
+static struct bch_fs_usage_short
+__bch2_fs_usage_read_short(struct bch_fs *c)
+{
+ struct bch_fs_usage_short ret;
+ u64 data, reserved;
+
+ ret.capacity = c->capacity -
+ bch2_fs_usage_read_one(c, &c->usage_base->hidden);
+
+ data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
+ bch2_fs_usage_read_one(c, &c->usage_base->btree);
+ reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
+ percpu_u64_get(c->online_reserved);
+
+ ret.used = min(ret.capacity, data + reserve_factor(reserved));
+ ret.free = ret.capacity - ret.used;
+
+ ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
+
+ return ret;
+}
+
+struct bch_fs_usage_short
+bch2_fs_usage_read_short(struct bch_fs *c)
+{
+ struct bch_fs_usage_short ret;
+
+ percpu_down_read(&c->mark_lock);
+ ret = __bch2_fs_usage_read_short(c);
+ percpu_up_read(&c->mark_lock);
+
+ return ret;
+}
+
+void bch2_dev_usage_init(struct bch_dev *ca)
+{
+ ca->usage_base->d[BCH_DATA_free].buckets = ca->mi.nbuckets - ca->mi.first_bucket;
+}
+
+static inline int bucket_sectors_fragmented(struct bch_dev *ca,
+ struct bch_alloc_v4 a)
+{
+ return a.dirty_sectors
+ ? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
+ : 0;
+}
+
+static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
+ struct bch_alloc_v4 old,
+ struct bch_alloc_v4 new,
+ u64 journal_seq, bool gc)
+{
+ struct bch_fs_usage *fs_usage;
+ struct bch_dev_usage *u;
+
+ preempt_disable();
+ fs_usage = fs_usage_ptr(c, journal_seq, gc);
+
+ if (data_type_is_hidden(old.data_type))
+ fs_usage->hidden -= ca->mi.bucket_size;
+ if (data_type_is_hidden(new.data_type))
+ fs_usage->hidden += ca->mi.bucket_size;
+
+ u = dev_usage_ptr(ca, journal_seq, gc);
+
+ u->d[old.data_type].buckets--;
+ u->d[new.data_type].buckets++;
+
+ u->buckets_ec -= (int) !!old.stripe;
+ u->buckets_ec += (int) !!new.stripe;
+
+ u->d[old.data_type].sectors -= old.dirty_sectors;
+ u->d[new.data_type].sectors += new.dirty_sectors;
+
+ u->d[BCH_DATA_cached].sectors += new.cached_sectors;
+ u->d[BCH_DATA_cached].sectors -= old.cached_sectors;
+
+ u->d[old.data_type].fragmented -= bucket_sectors_fragmented(ca, old);
+ u->d[new.data_type].fragmented += bucket_sectors_fragmented(ca, new);
+
+ preempt_enable();
+}
+
+static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
+ struct bucket old, struct bucket new,
+ u64 journal_seq, bool gc)
+{
+ struct bch_alloc_v4 old_a = {
+ .gen = old.gen,
+ .data_type = old.data_type,
+ .dirty_sectors = old.dirty_sectors,
+ .cached_sectors = old.cached_sectors,
+ .stripe = old.stripe,
+ };
+ struct bch_alloc_v4 new_a = {
+ .gen = new.gen,
+ .data_type = new.data_type,
+ .dirty_sectors = new.dirty_sectors,
+ .cached_sectors = new.cached_sectors,
+ .stripe = new.stripe,
+ };
+
+ bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
+}
+
+static inline int __update_replicas(struct bch_fs *c,
+ struct bch_fs_usage *fs_usage,
+ struct bch_replicas_entry *r,
+ s64 sectors)
+{
+ int idx = bch2_replicas_entry_idx(c, r);
+
+ if (idx < 0)
+ return -1;
+
+ fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
+ fs_usage->replicas[idx] += sectors;
+ return 0;
+}
+
+static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_replicas_entry *r, s64 sectors,
+ unsigned journal_seq, bool gc)
+{
+ struct bch_fs_usage *fs_usage;
+ int idx, ret = 0;
+ struct printbuf buf = PRINTBUF;
+
+ percpu_down_read(&c->mark_lock);
+
+ idx = bch2_replicas_entry_idx(c, r);
+ if (idx < 0 &&
+ fsck_err(c, "no replicas entry\n"
+ " while marking %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ percpu_up_read(&c->mark_lock);
+ ret = bch2_mark_replicas(c, r);
+ percpu_down_read(&c->mark_lock);
+
+ if (ret)
+ goto err;
+ idx = bch2_replicas_entry_idx(c, r);
+ }
+ if (idx < 0) {
+ ret = -1;
+ goto err;
+ }
+
+ preempt_disable();
+ fs_usage = fs_usage_ptr(c, journal_seq, gc);
+ fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
+ fs_usage->replicas[idx] += sectors;
+ preempt_enable();
+err:
+fsck_err:
+ percpu_up_read(&c->mark_lock);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static inline int update_cached_sectors(struct bch_fs *c,
+ struct bkey_s_c k,
+ unsigned dev, s64 sectors,
+ unsigned journal_seq, bool gc)
+{
+ struct bch_replicas_padded r;
+
+ bch2_replicas_entry_cached(&r.e, dev);
+
+ return update_replicas(c, k, &r.e, sectors, journal_seq, gc);
+}
+
+static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
+ gfp_t gfp)
+{
+ struct replicas_delta_list *d = trans->fs_usage_deltas;
+ unsigned new_size = d ? (d->size + more) * 2 : 128;
+ unsigned alloc_size = sizeof(*d) + new_size;
+
+ WARN_ON_ONCE(alloc_size > REPLICAS_DELTA_LIST_MAX);
+
+ if (!d || d->used + more > d->size) {
+ d = krealloc(d, alloc_size, gfp|__GFP_ZERO);
+
+ if (unlikely(!d)) {
+ if (alloc_size > REPLICAS_DELTA_LIST_MAX)
+ return -ENOMEM;
+
+ d = mempool_alloc(&trans->c->replicas_delta_pool, gfp);
+ if (!d)
+ return -ENOMEM;
+
+ memset(d, 0, REPLICAS_DELTA_LIST_MAX);
+
+ if (trans->fs_usage_deltas)
+ memcpy(d, trans->fs_usage_deltas,
+ trans->fs_usage_deltas->size + sizeof(*d));
+
+ new_size = REPLICAS_DELTA_LIST_MAX - sizeof(*d);
+ kfree(trans->fs_usage_deltas);
+ }
+
+ d->size = new_size;
+ trans->fs_usage_deltas = d;
+ }
+
+ return 0;
+}
+
+int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
+{
+ return allocate_dropping_locks_errcode(trans,
+ __replicas_deltas_realloc(trans, more, _gfp));
+}
+
+static inline int update_replicas_list(struct btree_trans *trans,
+ struct bch_replicas_entry *r,
+ s64 sectors)
+{
+ struct replicas_delta_list *d;
+ struct replicas_delta *n;
+ unsigned b;
+ int ret;
+
+ if (!sectors)
+ return 0;
+
+ b = replicas_entry_bytes(r) + 8;
+ ret = bch2_replicas_deltas_realloc(trans, b);
+ if (ret)
+ return ret;
+
+ d = trans->fs_usage_deltas;
+ n = (void *) d->d + d->used;
+ n->delta = sectors;
+ unsafe_memcpy((void *) n + offsetof(struct replicas_delta, r),
+ r, replicas_entry_bytes(r),
+ "flexible array member embedded in strcuct with padding");
+ bch2_replicas_entry_sort(&n->r);
+ d->used += b;
+ return 0;
+}
+
+static inline int update_cached_sectors_list(struct btree_trans *trans,
+ unsigned dev, s64 sectors)
+{
+ struct bch_replicas_padded r;
+
+ bch2_replicas_entry_cached(&r.e, dev);
+
+ return update_replicas_list(trans, &r.e, sectors);
+}
+
+int bch2_mark_alloc(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ u64 bucket_journal_seq;
+ struct bch_fs *c = trans->c;
+ struct bch_alloc_v4 old_a_convert, new_a_convert;
+ const struct bch_alloc_v4 *old_a, *new_a;
+ struct bch_dev *ca;
+ int ret = 0;
+
+ /*
+ * alloc btree is read in by bch2_alloc_read, not gc:
+ */
+ if ((flags & BTREE_TRIGGER_GC) &&
+ !(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
+ return 0;
+
+ if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
+ "alloc key for invalid device or bucket"))
+ return -EIO;
+
+ ca = bch_dev_bkey_exists(c, new.k->p.inode);
+
+ old_a = bch2_alloc_to_v4(old, &old_a_convert);
+ new_a = bch2_alloc_to_v4(new, &new_a_convert);
+
+ bucket_journal_seq = new_a->journal_seq;
+
+ if ((flags & BTREE_TRIGGER_INSERT) &&
+ data_type_is_empty(old_a->data_type) !=
+ data_type_is_empty(new_a->data_type) &&
+ new.k->type == KEY_TYPE_alloc_v4) {
+ struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
+
+ EBUG_ON(!journal_seq);
+
+ /*
+ * If the btree updates referring to a bucket weren't flushed
+ * before the bucket became empty again, then the we don't have
+ * to wait on a journal flush before we can reuse the bucket:
+ */
+ v->journal_seq = bucket_journal_seq =
+ data_type_is_empty(new_a->data_type) &&
+ (journal_seq == v->journal_seq ||
+ bch2_journal_noflush_seq(&c->journal, v->journal_seq))
+ ? 0 : journal_seq;
+ }
+
+ if (!data_type_is_empty(old_a->data_type) &&
+ data_type_is_empty(new_a->data_type) &&
+ bucket_journal_seq) {
+ ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+ c->journal.flushed_seq_ondisk,
+ new.k->p.inode, new.k->p.offset,
+ bucket_journal_seq);
+ if (ret) {
+ bch2_fs_fatal_error(c,
+ "error setting bucket_needs_journal_commit: %i", ret);
+ return ret;
+ }
+ }
+
+ percpu_down_read(&c->mark_lock);
+ if (!gc && new_a->gen != old_a->gen)
+ *bucket_gen(ca, new.k->p.offset) = new_a->gen;
+
+ bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
+
+ if (gc) {
+ struct bucket *g = gc_bucket(ca, new.k->p.offset);
+
+ bucket_lock(g);
+
+ g->gen_valid = 1;
+ g->gen = new_a->gen;
+ g->data_type = new_a->data_type;
+ g->stripe = new_a->stripe;
+ g->stripe_redundancy = new_a->stripe_redundancy;
+ g->dirty_sectors = new_a->dirty_sectors;
+ g->cached_sectors = new_a->cached_sectors;
+
+ bucket_unlock(g);
+ }
+ percpu_up_read(&c->mark_lock);
+
+ /*
+ * need to know if we're getting called from the invalidate path or
+ * not:
+ */
+
+ if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
+ old_a->cached_sectors) {
+ ret = update_cached_sectors(c, new, ca->dev_idx,
+ -((s64) old_a->cached_sectors),
+ journal_seq, gc);
+ if (ret) {
+ bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (new_a->data_type == BCH_DATA_free &&
+ (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
+ closure_wake_up(&c->freelist_wait);
+
+ if (new_a->data_type == BCH_DATA_need_discard &&
+ (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
+ bch2_do_discards(c);
+
+ if (old_a->data_type != BCH_DATA_cached &&
+ new_a->data_type == BCH_DATA_cached &&
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
+ bch2_do_invalidates(c);
+
+ if (new_a->data_type == BCH_DATA_need_gc_gens)
+ bch2_do_gc_gens(c);
+
+ return 0;
+}
+
+int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, enum bch_data_type data_type,
+ unsigned sectors, struct gc_pos pos,
+ unsigned flags)
+{
+ struct bucket old, new, *g;
+ int ret = 0;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+ BUG_ON(data_type != BCH_DATA_sb &&
+ data_type != BCH_DATA_journal);
+
+ /*
+ * Backup superblock might be past the end of our normal usable space:
+ */
+ if (b >= ca->mi.nbuckets)
+ return 0;
+
+ percpu_down_read(&c->mark_lock);
+ g = gc_bucket(ca, b);
+
+ bucket_lock(g);
+ old = *g;
+
+ if (bch2_fs_inconsistent_on(g->data_type &&
+ g->data_type != data_type, c,
+ "different types of data in same bucket: %s, %s",
+ bch2_data_types[g->data_type],
+ bch2_data_types[data_type])) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
+ "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
+ ca->dev_idx, b, g->gen,
+ bch2_data_types[g->data_type ?: data_type],
+ g->dirty_sectors, sectors)) {
+ ret = -EIO;
+ goto err;
+ }
+
+
+ g->data_type = data_type;
+ g->dirty_sectors += sectors;
+ new = *g;
+err:
+ bucket_unlock(g);
+ if (!ret)
+ bch2_dev_usage_update_m(c, ca, old, new, 0, true);
+ percpu_up_read(&c->mark_lock);
+ return ret;
+}
+
+static int check_bucket_ref(struct btree_trans *trans,
+ struct bkey_s_c k,
+ const struct bch_extent_ptr *ptr,
+ s64 sectors, enum bch_data_type ptr_data_type,
+ u8 b_gen, u8 bucket_data_type,
+ u32 dirty_sectors, u32 cached_sectors)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
+ u32 bucket_sectors = !ptr->cached
+ ? dirty_sectors
+ : cached_sectors;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (bucket_data_type == BCH_DATA_cached)
+ bucket_data_type = BCH_DATA_user;
+
+ if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
+ (bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe))
+ bucket_data_type = ptr_data_type = BCH_DATA_stripe;
+
+ if (gen_after(ptr->gen, b_gen)) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
+ "while marking %s",
+ ptr->dev, bucket_nr, b_gen,
+ bch2_data_types[bucket_data_type ?: ptr_data_type],
+ ptr->gen,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
+ }
+
+ if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ ptr->dev, bucket_nr, b_gen,
+ bch2_data_types[bucket_data_type ?: ptr_data_type],
+ ptr->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
+ }
+
+ if (b_gen != ptr->gen && !ptr->cached) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
+ "while marking %s",
+ ptr->dev, bucket_nr, b_gen,
+ *bucket_gen(ca, bucket_nr),
+ bch2_data_types[bucket_data_type ?: ptr_data_type],
+ ptr->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
+ }
+
+ if (b_gen != ptr->gen) {
+ ret = 1;
+ goto out;
+ }
+
+ if (!data_type_is_empty(bucket_data_type) &&
+ ptr_data_type &&
+ bucket_data_type != ptr_data_type) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ ptr->dev, bucket_nr, b_gen,
+ bch2_data_types[bucket_data_type],
+ bch2_data_types[ptr_data_type],
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
+ }
+
+ if ((u64) bucket_sectors + sectors > U32_MAX) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
+ "while marking %s",
+ ptr->dev, bucket_nr, b_gen,
+ bch2_data_types[bucket_data_type ?: ptr_data_type],
+ bucket_sectors, sectors,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EIO;
+ goto err;
+ }
+out:
+ printbuf_exit(&buf);
+ return ret;
+err:
+ bch2_dump_trans_updates(trans);
+ goto out;
+}
+
+static int mark_stripe_bucket(struct btree_trans *trans,
+ struct bkey_s_c k,
+ unsigned ptr_idx,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ u64 journal_seq = trans->journal_res.seq;
+ const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
+ unsigned nr_data = s->nr_blocks - s->nr_redundant;
+ bool parity = ptr_idx >= nr_data;
+ enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
+ s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
+ const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket old, new, *g;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
+ /* * XXX doesn't handle deletion */
+
+ percpu_down_read(&c->mark_lock);
+ g = PTR_GC_BUCKET(ca, ptr);
+
+ if (g->dirty_sectors ||
+ (g->stripe && g->stripe != k.k->p.offset)) {
+ bch2_fs_inconsistent(c,
+ "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
+ ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ bucket_lock(g);
+ old = *g;
+
+ ret = check_bucket_ref(trans, k, ptr, sectors, data_type,
+ g->gen, g->data_type,
+ g->dirty_sectors, g->cached_sectors);
+ if (ret)
+ goto err;
+
+ g->data_type = data_type;
+ g->dirty_sectors += sectors;
+
+ g->stripe = k.k->p.offset;
+ g->stripe_redundancy = s->nr_redundant;
+ new = *g;
+err:
+ bucket_unlock(g);
+ if (!ret)
+ bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
+ percpu_up_read(&c->mark_lock);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int __mark_pointer(struct btree_trans *trans,
+ struct bkey_s_c k,
+ const struct bch_extent_ptr *ptr,
+ s64 sectors, enum bch_data_type ptr_data_type,
+ u8 bucket_gen, u8 *bucket_data_type,
+ u32 *dirty_sectors, u32 *cached_sectors)
+{
+ u32 *dst_sectors = !ptr->cached
+ ? dirty_sectors
+ : cached_sectors;
+ int ret = check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
+ bucket_gen, *bucket_data_type,
+ *dirty_sectors, *cached_sectors);
+
+ if (ret)
+ return ret;
+
+ *dst_sectors += sectors;
+ *bucket_data_type = *dirty_sectors || *cached_sectors
+ ? ptr_data_type : 0;
+ return 0;
+}
+
+static int bch2_mark_pointer(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k,
+ struct extent_ptr_decoded p,
+ s64 sectors,
+ unsigned flags)
+{
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bucket old, new, *g;
+ enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
+ u8 bucket_data_type;
+ int ret = 0;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
+ percpu_down_read(&c->mark_lock);
+ g = PTR_GC_BUCKET(ca, &p.ptr);
+ bucket_lock(g);
+ old = *g;
+
+ bucket_data_type = g->data_type;
+ ret = __mark_pointer(trans, k, &p.ptr, sectors,
+ data_type, g->gen,
+ &bucket_data_type,
+ &g->dirty_sectors,
+ &g->cached_sectors);
+ if (!ret)
+ g->data_type = bucket_data_type;
+
+ new = *g;
+ bucket_unlock(g);
+ if (!ret)
+ bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
+ percpu_up_read(&c->mark_lock);
+
+ return ret;
+}
+
+static int bch2_mark_stripe_ptr(struct btree_trans *trans,
+ struct bkey_s_c k,
+ struct bch_extent_stripe_ptr p,
+ enum bch_data_type data_type,
+ s64 sectors,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_replicas_padded r;
+ struct gc_stripe *m;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
+ m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
+ if (!m) {
+ bch_err(c, "error allocating memory for gc_stripes, idx %llu",
+ (u64) p.idx);
+ return -BCH_ERR_ENOMEM_mark_stripe_ptr;
+ }
+
+ mutex_lock(&c->ec_stripes_heap_lock);
+
+ if (!m || !m->alive) {
+ mutex_unlock(&c->ec_stripes_heap_lock);
+ bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
+ (u64) p.idx);
+ bch2_inconsistent_error(c);
+ return -EIO;
+ }
+
+ m->block_sectors[p.block] += sectors;
+
+ r = m->r;
+ mutex_unlock(&c->ec_stripes_heap_lock);
+
+ r.e.data_type = data_type;
+ update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
+
+ return 0;
+}
+
+int bch2_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct bch_replicas_padded r;
+ enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
+ ? BCH_DATA_btree
+ : BCH_DATA_user;
+ s64 sectors = bkey_is_btree_ptr(k.k)
+ ? btree_sectors(c)
+ : k.k->size;
+ s64 dirty_sectors = 0;
+ bool stale;
+ int ret;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
+ r.e.data_type = data_type;
+ r.e.nr_devs = 0;
+ r.e.nr_required = 1;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ s64 disk_sectors = ptr_disk_sectors(sectors, p);
+
+ if (flags & BTREE_TRIGGER_OVERWRITE)
+ disk_sectors = -disk_sectors;
+
+ ret = bch2_mark_pointer(trans, btree_id, level, k, p, disk_sectors, flags);
+ if (ret < 0)
+ return ret;
+
+ stale = ret > 0;
+
+ if (p.ptr.cached) {
+ if (!stale) {
+ ret = update_cached_sectors(c, k, p.ptr.dev,
+ disk_sectors, journal_seq, true);
+ if (ret) {
+ bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
+ __func__);
+ return ret;
+ }
+ }
+ } else if (!p.has_ec) {
+ dirty_sectors += disk_sectors;
+ r.e.devs[r.e.nr_devs++] = p.ptr.dev;
+ } else {
+ ret = bch2_mark_stripe_ptr(trans, k, p.ec, data_type,
+ disk_sectors, flags);
+ if (ret)
+ return ret;
+
+ /*
+ * There may be other dirty pointers in this extent, but
+ * if so they're not required for mounting if we have an
+ * erasure coded pointer in this extent:
+ */
+ r.e.nr_required = 0;
+ }
+ }
+
+ if (r.e.nr_devs) {
+ ret = update_replicas(c, k, &r.e, dirty_sectors, journal_seq, true);
+ if (ret) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch2_fs_fatal_error(c, "%s(): no replicas entry for %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int bch2_mark_stripe(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ bool gc = flags & BTREE_TRIGGER_GC;
+ u64 journal_seq = trans->journal_res.seq;
+ struct bch_fs *c = trans->c;
+ u64 idx = new.k->p.offset;
+ const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
+ ? bkey_s_c_to_stripe(old).v : NULL;
+ const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
+ ? bkey_s_c_to_stripe(new).v : NULL;
+ unsigned i;
+ int ret;
+
+ BUG_ON(gc && old_s);
+
+ if (!gc) {
+ struct stripe *m = genradix_ptr(&c->stripes, idx);
+
+ if (!m) {
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf1, c, old);
+ bch2_bkey_val_to_text(&buf2, c, new);
+ bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
+ "old %s\n"
+ "new %s", idx, buf1.buf, buf2.buf);
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ bch2_inconsistent_error(c);
+ return -1;
+ }
+
+ if (!new_s) {
+ bch2_stripes_heap_del(c, m, idx);
+
+ memset(m, 0, sizeof(*m));
+ } else {
+ m->sectors = le16_to_cpu(new_s->sectors);
+ m->algorithm = new_s->algorithm;
+ m->nr_blocks = new_s->nr_blocks;
+ m->nr_redundant = new_s->nr_redundant;
+ m->blocks_nonempty = 0;
+
+ for (i = 0; i < new_s->nr_blocks; i++)
+ m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
+
+ if (!old_s)
+ bch2_stripes_heap_insert(c, m, idx);
+ else
+ bch2_stripes_heap_update(c, m, idx);
+ }
+ } else {
+ struct gc_stripe *m =
+ genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
+
+ if (!m) {
+ bch_err(c, "error allocating memory for gc_stripes, idx %llu",
+ idx);
+ return -BCH_ERR_ENOMEM_mark_stripe;
+ }
+ /*
+ * This will be wrong when we bring back runtime gc: we should
+ * be unmarking the old key and then marking the new key
+ */
+ m->alive = true;
+ m->sectors = le16_to_cpu(new_s->sectors);
+ m->nr_blocks = new_s->nr_blocks;
+ m->nr_redundant = new_s->nr_redundant;
+
+ for (i = 0; i < new_s->nr_blocks; i++)
+ m->ptrs[i] = new_s->ptrs[i];
+
+ bch2_bkey_to_replicas(&m->r.e, new);
+
+ /*
+ * gc recalculates this field from stripe ptr
+ * references:
+ */
+ memset(m->block_sectors, 0, sizeof(m->block_sectors));
+
+ for (i = 0; i < new_s->nr_blocks; i++) {
+ ret = mark_stripe_bucket(trans, new, i, flags);
+ if (ret)
+ return ret;
+ }
+
+ ret = update_replicas(c, new, &m->r.e,
+ ((s64) m->sectors * m->nr_redundant),
+ journal_seq, gc);
+ if (ret) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, new);
+ bch2_fs_fatal_error(c, "no replicas entry for %s", buf.buf);
+ printbuf_exit(&buf);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int bch2_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
+ struct bch_fs_usage *fs_usage;
+ unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
+ s64 sectors = (s64) k.k->size;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
+ if (flags & BTREE_TRIGGER_OVERWRITE)
+ sectors = -sectors;
+ sectors *= replicas;
+
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+
+ fs_usage = fs_usage_ptr(c, trans->journal_res.seq, flags & BTREE_TRIGGER_GC);
+ replicas = clamp_t(unsigned, replicas, 1,
+ ARRAY_SIZE(fs_usage->persistent_reserved));
+
+ fs_usage->reserved += sectors;
+ fs_usage->persistent_reserved[replicas - 1] += sectors;
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+
+ return 0;
+}
+
+static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
+ struct bkey_s_c_reflink_p p,
+ u64 start, u64 end,
+ u64 *idx, unsigned flags, size_t r_idx)
+{
+ struct bch_fs *c = trans->c;
+ struct reflink_gc *r;
+ int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
+ u64 next_idx = end;
+ s64 ret = 0;
+ struct printbuf buf = PRINTBUF;
+
+ if (r_idx >= c->reflink_gc_nr)
+ goto not_found;
+
+ r = genradix_ptr(&c->reflink_gc_table, r_idx);
+ next_idx = min(next_idx, r->offset - r->size);
+ if (*idx < next_idx)
+ goto not_found;
+
+ BUG_ON((s64) r->refcount + add < 0);
+
+ r->refcount += add;
+ *idx = r->offset;
+ return 0;
+not_found:
+ if (fsck_err(c, "pointer to missing indirect extent\n"
+ " %s\n"
+ " missing range %llu-%llu",
+ (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
+ *idx, next_idx)) {
+ struct bkey_i_error *new;
+
+ new = bch2_trans_kmalloc(trans, sizeof(*new));
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ goto err;
+
+ bkey_init(&new->k);
+ new->k.type = KEY_TYPE_error;
+ new->k.p = bkey_start_pos(p.k);
+ new->k.p.offset += *idx - start;
+ bch2_key_resize(&new->k, next_idx - *idx);
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i,
+ BTREE_TRIGGER_NORUN);
+ }
+
+ *idx = next_idx;
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
+ struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
+ struct reflink_gc *ref;
+ size_t l, r, m;
+ u64 idx = le64_to_cpu(p.v->idx), start = idx;
+ u64 end = le64_to_cpu(p.v->idx) + p.k->size;
+ int ret = 0;
+
+ BUG_ON(!(flags & BTREE_TRIGGER_GC));
+
+ if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_reflink_p_fix) {
+ idx -= le32_to_cpu(p.v->front_pad);
+ end += le32_to_cpu(p.v->back_pad);
+ }
+
+ l = 0;
+ r = c->reflink_gc_nr;
+ while (l < r) {
+ m = l + (r - l) / 2;
+
+ ref = genradix_ptr(&c->reflink_gc_table, m);
+ if (ref->offset <= idx)
+ l = m + 1;
+ else
+ r = m;
+ }
+
+ while (idx < end && !ret)
+ ret = __bch2_mark_reflink_p(trans, p, start, end,
+ &idx, flags, l++);
+
+ return ret;
+}
+
+void bch2_trans_fs_usage_revert(struct btree_trans *trans,
+ struct replicas_delta_list *deltas)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_fs_usage *dst;
+ struct replicas_delta *d, *top = (void *) deltas->d + deltas->used;
+ s64 added = 0;
+ unsigned i;
+
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+ dst = fs_usage_ptr(c, trans->journal_res.seq, false);
+
+ /* revert changes: */
+ for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
+ switch (d->r.data_type) {
+ case BCH_DATA_btree:
+ case BCH_DATA_user:
+ case BCH_DATA_parity:
+ added += d->delta;
+ }
+ BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
+ }
+
+ dst->nr_inodes -= deltas->nr_inodes;
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+ added -= deltas->persistent_reserved[i];
+ dst->reserved -= deltas->persistent_reserved[i];
+ dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
+ }
+
+ if (added > 0) {
+ trans->disk_res->sectors += added;
+ this_cpu_add(*c->online_reserved, added);
+ }
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+}
+
+int bch2_trans_fs_usage_apply(struct btree_trans *trans,
+ struct replicas_delta_list *deltas)
+{
+ struct bch_fs *c = trans->c;
+ static int warned_disk_usage = 0;
+ bool warn = false;
+ unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
+ struct replicas_delta *d, *d2;
+ struct replicas_delta *top = (void *) deltas->d + deltas->used;
+ struct bch_fs_usage *dst;
+ s64 added = 0, should_not_have_added;
+ unsigned i;
+
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+ dst = fs_usage_ptr(c, trans->journal_res.seq, false);
+
+ for (d = deltas->d; d != top; d = replicas_delta_next(d)) {
+ switch (d->r.data_type) {
+ case BCH_DATA_btree:
+ case BCH_DATA_user:
+ case BCH_DATA_parity:
+ added += d->delta;
+ }
+
+ if (__update_replicas(c, dst, &d->r, d->delta))
+ goto need_mark;
+ }
+
+ dst->nr_inodes += deltas->nr_inodes;
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+ added += deltas->persistent_reserved[i];
+ dst->reserved += deltas->persistent_reserved[i];
+ dst->persistent_reserved[i] += deltas->persistent_reserved[i];
+ }
+
+ /*
+ * Not allowed to reduce sectors_available except by getting a
+ * reservation:
+ */
+ should_not_have_added = added - (s64) disk_res_sectors;
+ if (unlikely(should_not_have_added > 0)) {
+ u64 old, new, v = atomic64_read(&c->sectors_available);
+
+ do {
+ old = v;
+ new = max_t(s64, 0, old - should_not_have_added);
+ } while ((v = atomic64_cmpxchg(&c->sectors_available,
+ old, new)) != old);
+
+ added -= should_not_have_added;
+ warn = true;
+ }
+
+ if (added > 0) {
+ trans->disk_res->sectors -= added;
+ this_cpu_sub(*c->online_reserved, added);
+ }
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+
+ if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
+ bch2_trans_inconsistent(trans,
+ "disk usage increased %lli more than %u sectors reserved)",
+ should_not_have_added, disk_res_sectors);
+ return 0;
+need_mark:
+ /* revert changes: */
+ for (d2 = deltas->d; d2 != d; d2 = replicas_delta_next(d2))
+ BUG_ON(__update_replicas(c, dst, &d2->r, -d2->delta));
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+ return -1;
+}
+
+/* trans_mark: */
+
+static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c k, struct extent_ptr_decoded p,
+ unsigned flags)
+{
+ bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
+ struct btree_iter iter;
+ struct bkey_i_alloc_v4 *a;
+ struct bpos bucket;
+ struct bch_backpointer bp;
+ s64 sectors;
+ int ret;
+
+ bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
+ sectors = bp.bucket_len;
+ if (!insert)
+ sectors = -sectors;
+
+ a = bch2_trans_start_alloc_update(trans, &iter, bucket);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
+ a->v.gen, &a->v.data_type,
+ &a->v.dirty_sectors, &a->v.cached_sectors) ?:
+ bch2_trans_update(trans, &iter, &a->k_i, 0);
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ return ret;
+
+ if (!p.ptr.cached) {
+ ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
+ struct extent_ptr_decoded p,
+ s64 sectors, enum bch_data_type data_type)
+{
+ struct btree_iter iter;
+ struct bkey_i_stripe *s;
+ struct bch_replicas_padded r;
+ int ret = 0;
+
+ s = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_stripes, POS(0, p.ec.idx),
+ BTREE_ITER_WITH_UPDATES, stripe);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (unlikely(ret)) {
+ bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
+ "pointer to nonexistent stripe %llu",
+ (u64) p.ec.idx);
+ goto err;
+ }
+
+ if (!bch2_ptr_matches_stripe(&s->v, p)) {
+ bch2_trans_inconsistent(trans,
+ "stripe pointer doesn't match stripe %llu",
+ (u64) p.ec.idx);
+ ret = -EIO;
+ goto err;
+ }
+
+ stripe_blockcount_set(&s->v, p.ec.block,
+ stripe_blockcount_get(&s->v, p.ec.block) +
+ sectors);
+
+ bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(&s->k_i));
+ r.e.data_type = data_type;
+ ret = update_replicas_list(trans, &r.e, sectors);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_trans_mark_extent(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
+ ? old
+ : bkey_i_to_s_c(new);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct bch_replicas_padded r;
+ enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
+ ? BCH_DATA_btree
+ : BCH_DATA_user;
+ s64 sectors = bkey_is_btree_ptr(k.k)
+ ? btree_sectors(c)
+ : k.k->size;
+ s64 dirty_sectors = 0;
+ bool stale;
+ int ret = 0;
+
+ r.e.data_type = data_type;
+ r.e.nr_devs = 0;
+ r.e.nr_required = 1;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ s64 disk_sectors = ptr_disk_sectors(sectors, p);
+
+ if (flags & BTREE_TRIGGER_OVERWRITE)
+ disk_sectors = -disk_sectors;
+
+ ret = bch2_trans_mark_pointer(trans, btree_id, level, k, p, flags);
+ if (ret < 0)
+ return ret;
+
+ stale = ret > 0;
+
+ if (p.ptr.cached) {
+ if (!stale) {
+ ret = update_cached_sectors_list(trans, p.ptr.dev,
+ disk_sectors);
+ if (ret)
+ return ret;
+ }
+ } else if (!p.has_ec) {
+ dirty_sectors += disk_sectors;
+ r.e.devs[r.e.nr_devs++] = p.ptr.dev;
+ } else {
+ ret = bch2_trans_mark_stripe_ptr(trans, p,
+ disk_sectors, data_type);
+ if (ret)
+ return ret;
+
+ r.e.nr_required = 0;
+ }
+ }
+
+ if (r.e.nr_devs)
+ ret = update_replicas_list(trans, &r.e, dirty_sectors);
+
+ return ret;
+}
+
+static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
+ struct bkey_s_c_stripe s,
+ unsigned idx, bool deleting)
+{
+ struct bch_fs *c = trans->c;
+ const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
+ struct btree_iter iter;
+ struct bkey_i_alloc_v4 *a;
+ enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
+ ? BCH_DATA_parity : 0;
+ s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
+ int ret = 0;
+
+ if (deleting)
+ sectors = -sectors;
+
+ a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ ret = check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
+ a->v.gen, a->v.data_type,
+ a->v.dirty_sectors, a->v.cached_sectors);
+ if (ret)
+ goto err;
+
+ if (!deleting) {
+ if (bch2_trans_inconsistent_on(a->v.stripe ||
+ a->v.stripe_redundancy, trans,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_types[a->v.data_type],
+ a->v.dirty_sectors,
+ a->v.stripe, s.k->p.offset)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_types[a->v.data_type],
+ a->v.dirty_sectors,
+ s.k->p.offset)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ a->v.stripe = s.k->p.offset;
+ a->v.stripe_redundancy = s.v->nr_redundant;
+ a->v.data_type = BCH_DATA_stripe;
+ } else {
+ if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
+ a->v.stripe_redundancy != s.v->nr_redundant, trans,
+ "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ s.k->p.offset, a->v.stripe)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ a->v.stripe = 0;
+ a->v.stripe_redundancy = 0;
+ a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
+ }
+
+ a->v.dirty_sectors += sectors;
+ if (data_type)
+ a->v.data_type = !deleting ? data_type : 0;
+
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_trans_mark_stripe(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ const struct bch_stripe *old_s = NULL;
+ struct bch_stripe *new_s = NULL;
+ struct bch_replicas_padded r;
+ unsigned i, nr_blocks;
+ int ret = 0;
+
+ if (old.k->type == KEY_TYPE_stripe)
+ old_s = bkey_s_c_to_stripe(old).v;
+ if (new->k.type == KEY_TYPE_stripe)
+ new_s = &bkey_i_to_stripe(new)->v;
+
+ /*
+ * If the pointers aren't changing, we don't need to do anything:
+ */
+ if (new_s && old_s &&
+ new_s->nr_blocks == old_s->nr_blocks &&
+ new_s->nr_redundant == old_s->nr_redundant &&
+ !memcmp(old_s->ptrs, new_s->ptrs,
+ new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
+ return 0;
+
+ BUG_ON(new_s && old_s &&
+ (new_s->nr_blocks != old_s->nr_blocks ||
+ new_s->nr_redundant != old_s->nr_redundant));
+
+ nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
+
+ if (new_s) {
+ s64 sectors = le16_to_cpu(new_s->sectors);
+
+ bch2_bkey_to_replicas(&r.e, bkey_i_to_s_c(new));
+ ret = update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
+ if (ret)
+ return ret;
+ }
+
+ if (old_s) {
+ s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
+
+ bch2_bkey_to_replicas(&r.e, old);
+ ret = update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < nr_blocks; i++) {
+ if (new_s && old_s &&
+ !memcmp(&new_s->ptrs[i],
+ &old_s->ptrs[i],
+ sizeof(new_s->ptrs[i])))
+ continue;
+
+ if (new_s) {
+ ret = bch2_trans_mark_stripe_bucket(trans,
+ bkey_i_to_s_c_stripe(new), i, false);
+ if (ret)
+ break;
+ }
+
+ if (old_s) {
+ ret = bch2_trans_mark_stripe_bucket(trans,
+ bkey_s_c_to_stripe(old), i, true);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int bch2_trans_mark_reservation(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
+ ? old
+ : bkey_i_to_s_c(new);
+ unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
+ s64 sectors = (s64) k.k->size;
+ struct replicas_delta_list *d;
+ int ret;
+
+ if (flags & BTREE_TRIGGER_OVERWRITE)
+ sectors = -sectors;
+ sectors *= replicas;
+
+ ret = bch2_replicas_deltas_realloc(trans, 0);
+ if (ret)
+ return ret;
+
+ d = trans->fs_usage_deltas;
+ replicas = clamp_t(unsigned, replicas, 1,
+ ARRAY_SIZE(d->persistent_reserved));
+
+ d->persistent_reserved[replicas - 1] += sectors;
+ return 0;
+}
+
+static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
+ struct bkey_s_c_reflink_p p,
+ u64 *idx, unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_i *k;
+ __le64 *refcount;
+ int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ k = bch2_bkey_get_mut_noupdate(trans, &iter,
+ BTREE_ID_reflink, POS(0, *idx),
+ BTREE_ITER_WITH_UPDATES);
+ ret = PTR_ERR_OR_ZERO(k);
+ if (ret)
+ goto err;
+
+ refcount = bkey_refcount(k);
+ if (!refcount) {
+ bch2_bkey_val_to_text(&buf, c, p.s_c);
+ bch2_trans_inconsistent(trans,
+ "nonexistent indirect extent at %llu while marking\n %s",
+ *idx, buf.buf);
+ ret = -EIO;
+ goto err;
+ }
+
+ if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
+ bch2_bkey_val_to_text(&buf, c, p.s_c);
+ bch2_trans_inconsistent(trans,
+ "indirect extent refcount underflow at %llu while marking\n %s",
+ *idx, buf.buf);
+ ret = -EIO;
+ goto err;
+ }
+
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
+ u64 pad;
+
+ pad = max_t(s64, le32_to_cpu(v->front_pad),
+ le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
+ BUG_ON(pad > U32_MAX);
+ v->front_pad = cpu_to_le32(pad);
+
+ pad = max_t(s64, le32_to_cpu(v->back_pad),
+ k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
+ BUG_ON(pad > U32_MAX);
+ v->back_pad = cpu_to_le32(pad);
+ }
+
+ le64_add_cpu(refcount, add);
+
+ bch2_btree_iter_set_pos_to_extent_start(&iter);
+ ret = bch2_trans_update(trans, &iter, k, 0);
+ if (ret)
+ goto err;
+
+ *idx = k->k.p.offset;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_trans_mark_reflink_p(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
+ ? old
+ : bkey_i_to_s_c(new);
+ struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
+ u64 idx, end_idx;
+ int ret = 0;
+
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
+
+ v->front_pad = v->back_pad = 0;
+ }
+
+ idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
+ end_idx = le64_to_cpu(p.v->idx) + p.k->size +
+ le32_to_cpu(p.v->back_pad);
+
+ while (idx < end_idx && !ret)
+ ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
+
+ return ret;
+}
+
+static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
+ struct bch_dev *ca, size_t b,
+ enum bch_data_type type,
+ unsigned sectors)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_i_alloc_v4 *a;
+ int ret = 0;
+
+ /*
+ * Backup superblock might be past the end of our normal usable space:
+ */
+ if (b >= ca->mi.nbuckets)
+ return 0;
+
+ a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ if (a->v.data_type && type && a->v.data_type != type) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ iter.pos.inode, iter.pos.offset, a->v.gen,
+ bch2_data_types[a->v.data_type],
+ bch2_data_types[type],
+ bch2_data_types[type]);
+ ret = -EIO;
+ goto out;
+ }
+
+ a->v.data_type = type;
+ a->v.dirty_sectors = sectors;
+
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+ if (ret)
+ goto out;
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
+ struct bch_dev *ca, size_t b,
+ enum bch_data_type type,
+ unsigned sectors)
+{
+ return commit_do(trans, NULL, NULL, 0,
+ __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
+}
+
+static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
+ struct bch_dev *ca,
+ u64 start, u64 end,
+ enum bch_data_type type,
+ u64 *bucket, unsigned *bucket_sectors)
+{
+ do {
+ u64 b = sector_to_bucket(ca, start);
+ unsigned sectors =
+ min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
+
+ if (b != *bucket && *bucket_sectors) {
+ int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
+ type, *bucket_sectors);
+ if (ret)
+ return ret;
+
+ *bucket_sectors = 0;
+ }
+
+ *bucket = b;
+ *bucket_sectors += sectors;
+ start += sectors;
+ } while (start < end);
+
+ return 0;
+}
+
+static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
+ struct bch_dev *ca)
+{
+ struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
+ u64 bucket = 0;
+ unsigned i, bucket_sectors = 0;
+ int ret;
+
+ for (i = 0; i < layout->nr_superblocks; i++) {
+ u64 offset = le64_to_cpu(layout->sb_offset[i]);
+
+ if (offset == BCH_SB_SECTOR) {
+ ret = bch2_trans_mark_metadata_sectors(trans, ca,
+ 0, BCH_SB_SECTOR,
+ BCH_DATA_sb, &bucket, &bucket_sectors);
+ if (ret)
+ return ret;
+ }
+
+ ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
+ offset + (1 << layout->sb_max_size_bits),
+ BCH_DATA_sb, &bucket, &bucket_sectors);
+ if (ret)
+ return ret;
+ }
+
+ if (bucket_sectors) {
+ ret = bch2_trans_mark_metadata_bucket(trans, ca,
+ bucket, BCH_DATA_sb, bucket_sectors);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ca->journal.nr; i++) {
+ ret = bch2_trans_mark_metadata_bucket(trans, ca,
+ ca->journal.buckets[i],
+ BCH_DATA_journal, ca->mi.bucket_size);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
+{
+ int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/* Disk reservations: */
+
+#define SECTORS_CACHE 1024
+
+int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+ u64 sectors, int flags)
+{
+ struct bch_fs_pcpu *pcpu;
+ u64 old, v, get;
+ s64 sectors_available;
+ int ret;
+
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+ pcpu = this_cpu_ptr(c->pcpu);
+
+ if (sectors <= pcpu->sectors_available)
+ goto out;
+
+ v = atomic64_read(&c->sectors_available);
+ do {
+ old = v;
+ get = min((u64) sectors + SECTORS_CACHE, old);
+
+ if (get < sectors) {
+ preempt_enable();
+ goto recalculate;
+ }
+ } while ((v = atomic64_cmpxchg(&c->sectors_available,
+ old, old - get)) != old);
+
+ pcpu->sectors_available += get;
+
+out:
+ pcpu->sectors_available -= sectors;
+ this_cpu_add(*c->online_reserved, sectors);
+ res->sectors += sectors;
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+ return 0;
+
+recalculate:
+ mutex_lock(&c->sectors_available_lock);
+
+ percpu_u64_set(&c->pcpu->sectors_available, 0);
+ sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
+
+ if (sectors <= sectors_available ||
+ (flags & BCH_DISK_RESERVATION_NOFAIL)) {
+ atomic64_set(&c->sectors_available,
+ max_t(s64, 0, sectors_available - sectors));
+ this_cpu_add(*c->online_reserved, sectors);
+ res->sectors += sectors;
+ ret = 0;
+ } else {
+ atomic64_set(&c->sectors_available, sectors_available);
+ ret = -BCH_ERR_ENOSPC_disk_reservation;
+ }
+
+ mutex_unlock(&c->sectors_available_lock);
+ percpu_up_read(&c->mark_lock);
+
+ return ret;
+}
+
+/* Startup/shutdown: */
+
+static void bucket_gens_free_rcu(struct rcu_head *rcu)
+{
+ struct bucket_gens *buckets =
+ container_of(rcu, struct bucket_gens, rcu);
+
+ kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets);
+}
+
+int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
+{
+ struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
+ unsigned long *buckets_nouse = NULL;
+ bool resize = ca->bucket_gens != NULL;
+ int ret;
+
+ if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
+ GFP_KERNEL|__GFP_ZERO))) {
+ ret = -BCH_ERR_ENOMEM_bucket_gens;
+ goto err;
+ }
+
+ if ((c->opts.buckets_nouse &&
+ !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ sizeof(unsigned long),
+ GFP_KERNEL|__GFP_ZERO)))) {
+ ret = -BCH_ERR_ENOMEM_buckets_nouse;
+ goto err;
+ }
+
+ bucket_gens->first_bucket = ca->mi.first_bucket;
+ bucket_gens->nbuckets = nbuckets;
+
+ bch2_copygc_stop(c);
+
+ if (resize) {
+ down_write(&c->gc_lock);
+ down_write(&ca->bucket_lock);
+ percpu_down_write(&c->mark_lock);
+ }
+
+ old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
+
+ if (resize) {
+ size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets);
+
+ memcpy(bucket_gens->b,
+ old_bucket_gens->b,
+ n);
+ if (buckets_nouse)
+ memcpy(buckets_nouse,
+ ca->buckets_nouse,
+ BITS_TO_LONGS(n) * sizeof(unsigned long));
+ }
+
+ rcu_assign_pointer(ca->bucket_gens, bucket_gens);
+ bucket_gens = old_bucket_gens;
+
+ swap(ca->buckets_nouse, buckets_nouse);
+
+ nbuckets = ca->mi.nbuckets;
+
+ if (resize) {
+ percpu_up_write(&c->mark_lock);
+ up_write(&ca->bucket_lock);
+ up_write(&c->gc_lock);
+ }
+
+ ret = 0;
+err:
+ kvpfree(buckets_nouse,
+ BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+ if (bucket_gens)
+ call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
+
+ return ret;
+}
+
+void bch2_dev_buckets_free(struct bch_dev *ca)
+{
+ unsigned i;
+
+ kvpfree(ca->buckets_nouse,
+ BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
+ kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
+ sizeof(struct bucket_gens) + ca->mi.nbuckets);
+
+ for (i = 0; i < ARRAY_SIZE(ca->usage); i++)
+ free_percpu(ca->usage[i]);
+ kfree(ca->usage_base);
+}
+
+int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ unsigned i;
+
+ ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
+ if (!ca->usage_base)
+ return -BCH_ERR_ENOMEM_usage_init;
+
+ for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
+ ca->usage[i] = alloc_percpu(struct bch_dev_usage);
+ if (!ca->usage[i])
+ return -BCH_ERR_ENOMEM_usage_init;
+ }
+
+ return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
+}
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
new file mode 100644
index 000000000000..bf8d7f407e9c
--- /dev/null
+++ b/fs/bcachefs/buckets.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code for manipulating bucket marks for garbage collection.
+ *
+ * Copyright 2014 Datera, Inc.
+ */
+
+#ifndef _BUCKETS_H
+#define _BUCKETS_H
+
+#include "buckets_types.h"
+#include "extents.h"
+#include "sb-members.h"
+
+static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
+{
+ return div_u64(s, ca->mi.bucket_size);
+}
+
+static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
+{
+ return ((sector_t) b) * ca->mi.bucket_size;
+}
+
+static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
+{
+ u32 remainder;
+
+ div_u64_rem(s, ca->mi.bucket_size, &remainder);
+ return remainder;
+}
+
+static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
+ u32 *offset)
+{
+ return div_u64_rem(s, ca->mi.bucket_size, offset);
+}
+
+#define for_each_bucket(_b, _buckets) \
+ for (_b = (_buckets)->b + (_buckets)->first_bucket; \
+ _b < (_buckets)->b + (_buckets)->nbuckets; _b++)
+
+/*
+ * Ugly hack alert:
+ *
+ * We need to cram a spinlock in a single byte, because that's what we have left
+ * in struct bucket, and we care about the size of these - during fsck, we need
+ * in memory state for every single bucket on every device.
+ *
+ * We used to do
+ * while (xchg(&b->lock, 1) cpu_relax();
+ * but, it turns out not all architectures support xchg on a single byte.
+ *
+ * So now we use bit_spin_lock(), with fun games since we can't burn a whole
+ * ulong for this - we just need to make sure the lock bit always ends up in the
+ * first byte.
+ */
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define BUCKET_LOCK_BITNR 0
+#else
+#define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1)
+#endif
+
+union ulong_byte_assert {
+ ulong ulong;
+ u8 byte;
+};
+
+static inline void bucket_unlock(struct bucket *b)
+{
+ BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
+
+ clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
+ wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
+}
+
+static inline void bucket_lock(struct bucket *b)
+{
+ wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
+ TASK_UNINTERRUPTIBLE);
+}
+
+static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
+{
+ return rcu_dereference_check(ca->buckets_gc,
+ !ca->fs ||
+ percpu_rwsem_is_held(&ca->fs->mark_lock) ||
+ lockdep_is_held(&ca->fs->gc_lock) ||
+ lockdep_is_held(&ca->bucket_lock));
+}
+
+static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
+{
+ struct bucket_array *buckets = gc_bucket_array(ca);
+
+ BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
+ return buckets->b + b;
+}
+
+static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
+{
+ return rcu_dereference_check(ca->bucket_gens,
+ !ca->fs ||
+ percpu_rwsem_is_held(&ca->fs->mark_lock) ||
+ lockdep_is_held(&ca->fs->gc_lock) ||
+ lockdep_is_held(&ca->bucket_lock));
+}
+
+static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
+{
+ struct bucket_gens *gens = bucket_gens(ca);
+
+ BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
+ return gens->b + b;
+}
+
+static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
+ const struct bch_extent_ptr *ptr)
+{
+ return sector_to_bucket(ca, ptr->offset);
+}
+
+static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
+ const struct bch_extent_ptr *ptr)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
+}
+
+static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
+ const struct bch_extent_ptr *ptr,
+ u32 *bucket_offset)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
+}
+
+static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
+ const struct bch_extent_ptr *ptr)
+{
+ return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr));
+}
+
+static inline enum bch_data_type ptr_data_type(const struct bkey *k,
+ const struct bch_extent_ptr *ptr)
+{
+ if (bkey_is_btree_ptr(k))
+ return BCH_DATA_btree;
+
+ return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
+}
+
+static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
+{
+ EBUG_ON(sectors < 0);
+
+ return crc_is_compressed(p.crc)
+ ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
+ p.crc.uncompressed_size)
+ : sectors;
+}
+
+static inline int gen_cmp(u8 a, u8 b)
+{
+ return (s8) (a - b);
+}
+
+static inline int gen_after(u8 a, u8 b)
+{
+ int r = gen_cmp(a, b);
+
+ return r > 0 ? r : 0;
+}
+
+/**
+ * ptr_stale() - check if a pointer points into a bucket that has been
+ * invalidated.
+ */
+static inline u8 ptr_stale(struct bch_dev *ca,
+ const struct bch_extent_ptr *ptr)
+{
+ u8 ret;
+
+ rcu_read_lock();
+ ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/* Device usage: */
+
+void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *);
+static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
+{
+ struct bch_dev_usage ret;
+
+ bch2_dev_usage_read_fast(ca, &ret);
+ return ret;
+}
+
+void bch2_dev_usage_init(struct bch_dev *);
+
+static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
+{
+ s64 reserved = 0;
+
+ switch (watermark) {
+ case BCH_WATERMARK_NR:
+ BUG();
+ case BCH_WATERMARK_stripe:
+ reserved += ca->mi.nbuckets >> 6;
+ fallthrough;
+ case BCH_WATERMARK_normal:
+ reserved += ca->mi.nbuckets >> 6;
+ fallthrough;
+ case BCH_WATERMARK_copygc:
+ reserved += ca->nr_btree_reserve;
+ fallthrough;
+ case BCH_WATERMARK_btree:
+ reserved += ca->nr_btree_reserve;
+ fallthrough;
+ case BCH_WATERMARK_btree_copygc:
+ case BCH_WATERMARK_reclaim:
+ break;
+ }
+
+ return reserved;
+}
+
+static inline u64 dev_buckets_free(struct bch_dev *ca,
+ struct bch_dev_usage usage,
+ enum bch_watermark watermark)
+{
+ return max_t(s64, 0,
+ usage.d[BCH_DATA_free].buckets -
+ ca->nr_open_buckets -
+ bch2_dev_buckets_reserved(ca, watermark));
+}
+
+static inline u64 __dev_buckets_available(struct bch_dev *ca,
+ struct bch_dev_usage usage,
+ enum bch_watermark watermark)
+{
+ return max_t(s64, 0,
+ usage.d[BCH_DATA_free].buckets
+ + usage.d[BCH_DATA_cached].buckets
+ + usage.d[BCH_DATA_need_gc_gens].buckets
+ + usage.d[BCH_DATA_need_discard].buckets
+ - ca->nr_open_buckets
+ - bch2_dev_buckets_reserved(ca, watermark));
+}
+
+static inline u64 dev_buckets_available(struct bch_dev *ca,
+ enum bch_watermark watermark)
+{
+ return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
+}
+
+/* Filesystem usage: */
+
+static inline unsigned __fs_usage_u64s(unsigned nr_replicas)
+{
+ return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas;
+}
+
+static inline unsigned fs_usage_u64s(struct bch_fs *c)
+{
+ return __fs_usage_u64s(READ_ONCE(c->replicas.nr));
+}
+
+static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas)
+{
+ return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas;
+}
+
+static inline unsigned fs_usage_online_u64s(struct bch_fs *c)
+{
+ return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr));
+}
+
+static inline unsigned dev_usage_u64s(void)
+{
+ return sizeof(struct bch_dev_usage) / sizeof(u64);
+}
+
+u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
+
+struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
+
+void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
+
+void bch2_fs_usage_to_text(struct printbuf *,
+ struct bch_fs *, struct bch_fs_usage_online *);
+
+u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
+
+struct bch_fs_usage_short
+bch2_fs_usage_read_short(struct bch_fs *);
+
+/* key/bucket marking: */
+
+static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
+ unsigned journal_seq,
+ bool gc)
+{
+ percpu_rwsem_assert_held(&c->mark_lock);
+ BUG_ON(!gc && !journal_seq);
+
+ return this_cpu_ptr(gc
+ ? c->usage_gc
+ : c->usage[journal_seq & JOURNAL_BUF_MASK]);
+}
+
+int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned);
+
+void bch2_fs_usage_initialize(struct bch_fs *);
+
+int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
+ size_t, enum bch_data_type, unsigned,
+ struct gc_pos, unsigned);
+
+int bch2_mark_alloc(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_extent(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_stripe(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_reservation(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+int bch2_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+
+int bch2_trans_mark_extent(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_stripe(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_trans_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
+
+void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
+int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
+
+int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
+ size_t, enum bch_data_type, unsigned);
+int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
+
+static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
+{
+ struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
+ u64 b_offset = bucket_to_sector(ca, b);
+ u64 b_end = bucket_to_sector(ca, b + 1);
+ unsigned i;
+
+ if (!b)
+ return true;
+
+ for (i = 0; i < layout->nr_superblocks; i++) {
+ u64 offset = le64_to_cpu(layout->sb_offset[i]);
+ u64 end = offset + (1 << layout->sb_max_size_bits);
+
+ if (!(offset >= b_end || end <= b_offset))
+ return true;
+ }
+
+ return false;
+}
+
+/* disk reservations: */
+
+static inline void bch2_disk_reservation_put(struct bch_fs *c,
+ struct disk_reservation *res)
+{
+ if (res->sectors) {
+ this_cpu_sub(*c->online_reserved, res->sectors);
+ res->sectors = 0;
+ }
+}
+
+#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
+
+int __bch2_disk_reservation_add(struct bch_fs *,
+ struct disk_reservation *,
+ u64, int);
+
+static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
+ u64 sectors, int flags)
+{
+#ifdef __KERNEL__
+ u64 old, new;
+
+ do {
+ old = this_cpu_read(c->pcpu->sectors_available);
+ if (sectors > old)
+ return __bch2_disk_reservation_add(c, res, sectors, flags);
+
+ new = old - sectors;
+ } while (this_cpu_cmpxchg(c->pcpu->sectors_available, old, new) != old);
+
+ this_cpu_add(*c->online_reserved, sectors);
+ res->sectors += sectors;
+ return 0;
+#else
+ return __bch2_disk_reservation_add(c, res, sectors, flags);
+#endif
+}
+
+static inline struct disk_reservation
+bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
+{
+ return (struct disk_reservation) {
+ .sectors = 0,
+#if 0
+ /* not used yet: */
+ .gen = c->capacity_gen,
+#endif
+ .nr_replicas = nr_replicas,
+ };
+}
+
+static inline int bch2_disk_reservation_get(struct bch_fs *c,
+ struct disk_reservation *res,
+ u64 sectors, unsigned nr_replicas,
+ int flags)
+{
+ *res = bch2_disk_reservation_init(c, nr_replicas);
+
+ return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
+}
+
+#define RESERVE_FACTOR 6
+
+static inline u64 avail_factor(u64 r)
+{
+ return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
+}
+
+int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
+void bch2_dev_buckets_free(struct bch_dev *);
+int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
+
+#endif /* _BUCKETS_H */
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
new file mode 100644
index 000000000000..2a9dab9006ef
--- /dev/null
+++ b/fs/bcachefs/buckets_types.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BUCKETS_TYPES_H
+#define _BUCKETS_TYPES_H
+
+#include "bcachefs_format.h"
+#include "util.h"
+
+#define BUCKET_JOURNAL_SEQ_BITS 16
+
+struct bucket {
+ u8 lock;
+ u8 gen_valid:1;
+ u8 data_type:7;
+ u8 gen;
+ u8 stripe_redundancy;
+ u32 stripe;
+ u32 dirty_sectors;
+ u32 cached_sectors;
+};
+
+struct bucket_array {
+ struct rcu_head rcu;
+ u16 first_bucket;
+ size_t nbuckets;
+ struct bucket b[];
+};
+
+struct bucket_gens {
+ struct rcu_head rcu;
+ u16 first_bucket;
+ size_t nbuckets;
+ u8 b[];
+};
+
+struct bch_dev_usage {
+ u64 buckets_ec;
+
+ struct {
+ u64 buckets;
+ u64 sectors; /* _compressed_ sectors: */
+ /*
+ * XXX
+ * Why do we have this? Isn't it just buckets * bucket_size -
+ * sectors?
+ */
+ u64 fragmented;
+ } d[BCH_DATA_NR];
+};
+
+struct bch_fs_usage {
+ /* all fields are in units of 512 byte sectors: */
+ u64 hidden;
+ u64 btree;
+ u64 data;
+ u64 cached;
+ u64 reserved;
+ u64 nr_inodes;
+
+ /* XXX: add stats for compression ratio */
+#if 0
+ u64 uncompressed;
+ u64 compressed;
+#endif
+
+ /* broken out: */
+
+ u64 persistent_reserved[BCH_REPLICAS_MAX];
+ u64 replicas[];
+};
+
+struct bch_fs_usage_online {
+ u64 online_reserved;
+ struct bch_fs_usage u;
+};
+
+struct bch_fs_usage_short {
+ u64 capacity;
+ u64 used;
+ u64 free;
+ u64 nr_inodes;
+};
+
+/*
+ * A reservation for space on disk:
+ */
+struct disk_reservation {
+ u64 sectors;
+ u32 gen;
+ unsigned nr_replicas;
+};
+
+#endif /* _BUCKETS_TYPES_H */
diff --git a/fs/bcachefs/buckets_waiting_for_journal.c b/fs/bcachefs/buckets_waiting_for_journal.c
new file mode 100644
index 000000000000..ec1b636ef78d
--- /dev/null
+++ b/fs/bcachefs/buckets_waiting_for_journal.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "buckets_waiting_for_journal.h"
+#include <linux/hash.h>
+#include <linux/random.h>
+
+static inline struct bucket_hashed *
+bucket_hash(struct buckets_waiting_for_journal_table *t,
+ unsigned hash_seed_idx, u64 dev_bucket)
+{
+ return t->d + hash_64(dev_bucket ^ t->hash_seeds[hash_seed_idx], t->bits);
+}
+
+static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_t bits)
+{
+ unsigned i;
+
+ t->bits = bits;
+ for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++)
+ get_random_bytes(&t->hash_seeds[i], sizeof(t->hash_seeds[i]));
+ memset(t->d, 0, sizeof(t->d[0]) << t->bits);
+}
+
+bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
+ u64 flushed_seq,
+ unsigned dev, u64 bucket)
+{
+ struct buckets_waiting_for_journal_table *t;
+ u64 dev_bucket = (u64) dev << 56 | bucket;
+ bool ret = false;
+ unsigned i;
+
+ mutex_lock(&b->lock);
+ t = b->t;
+
+ for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
+ struct bucket_hashed *h = bucket_hash(t, i, dev_bucket);
+
+ if (h->dev_bucket == dev_bucket) {
+ ret = h->journal_seq > flushed_seq;
+ break;
+ }
+ }
+
+ mutex_unlock(&b->lock);
+
+ return ret;
+}
+
+static bool bucket_table_insert(struct buckets_waiting_for_journal_table *t,
+ struct bucket_hashed *new,
+ u64 flushed_seq)
+{
+ struct bucket_hashed *last_evicted = NULL;
+ unsigned tries, i;
+
+ for (tries = 0; tries < 10; tries++) {
+ struct bucket_hashed *old, *victim = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
+ old = bucket_hash(t, i, new->dev_bucket);
+
+ if (old->dev_bucket == new->dev_bucket ||
+ old->journal_seq <= flushed_seq) {
+ *old = *new;
+ return true;
+ }
+
+ if (last_evicted != old)
+ victim = old;
+ }
+
+ /* hashed to same slot 3 times: */
+ if (!victim)
+ break;
+
+ /* Failed to find an empty slot: */
+ swap(*new, *victim);
+ last_evicted = victim;
+ }
+
+ return false;
+}
+
+int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
+ u64 flushed_seq,
+ unsigned dev, u64 bucket,
+ u64 journal_seq)
+{
+ struct buckets_waiting_for_journal_table *t, *n;
+ struct bucket_hashed tmp, new = {
+ .dev_bucket = (u64) dev << 56 | bucket,
+ .journal_seq = journal_seq,
+ };
+ size_t i, size, new_bits, nr_elements = 1, nr_rehashes = 0;
+ int ret = 0;
+
+ mutex_lock(&b->lock);
+
+ if (likely(bucket_table_insert(b->t, &new, flushed_seq)))
+ goto out;
+
+ t = b->t;
+ size = 1UL << t->bits;
+ for (i = 0; i < size; i++)
+ nr_elements += t->d[i].journal_seq > flushed_seq;
+
+ new_bits = t->bits + (nr_elements * 3 > size);
+
+ n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
+ if (!n) {
+ ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set;
+ goto out;
+ }
+
+retry_rehash:
+ nr_rehashes++;
+ bucket_table_init(n, new_bits);
+
+ tmp = new;
+ BUG_ON(!bucket_table_insert(n, &tmp, flushed_seq));
+
+ for (i = 0; i < 1UL << t->bits; i++) {
+ if (t->d[i].journal_seq <= flushed_seq)
+ continue;
+
+ tmp = t->d[i];
+ if (!bucket_table_insert(n, &tmp, flushed_seq))
+ goto retry_rehash;
+ }
+
+ b->t = n;
+ kvfree(t);
+
+ pr_debug("took %zu rehashes, table at %zu/%lu elements",
+ nr_rehashes, nr_elements, 1UL << b->t->bits);
+out:
+ mutex_unlock(&b->lock);
+
+ return ret;
+}
+
+void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *c)
+{
+ struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
+
+ kvfree(b->t);
+}
+
+#define INITIAL_TABLE_BITS 3
+
+int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
+{
+ struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
+
+ mutex_init(&b->lock);
+
+ b->t = kvmalloc(sizeof(*b->t) +
+ (sizeof(b->t->d[0]) << INITIAL_TABLE_BITS), GFP_KERNEL);
+ if (!b->t)
+ return -BCH_ERR_ENOMEM_buckets_waiting_for_journal_init;
+
+ bucket_table_init(b->t, INITIAL_TABLE_BITS);
+ return 0;
+}
diff --git a/fs/bcachefs/buckets_waiting_for_journal.h b/fs/bcachefs/buckets_waiting_for_journal.h
new file mode 100644
index 000000000000..d2ae19cbe18c
--- /dev/null
+++ b/fs/bcachefs/buckets_waiting_for_journal.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BUCKETS_WAITING_FOR_JOURNAL_H
+#define _BUCKETS_WAITING_FOR_JOURNAL_H
+
+#include "buckets_waiting_for_journal_types.h"
+
+bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
+ u64, unsigned, u64);
+int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
+ u64, unsigned, u64, u64);
+
+void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *);
+int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *);
+
+#endif /* _BUCKETS_WAITING_FOR_JOURNAL_H */
diff --git a/fs/bcachefs/buckets_waiting_for_journal_types.h b/fs/bcachefs/buckets_waiting_for_journal_types.h
new file mode 100644
index 000000000000..e593db061d81
--- /dev/null
+++ b/fs/bcachefs/buckets_waiting_for_journal_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H
+#define _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H
+
+#include <linux/siphash.h>
+
+struct bucket_hashed {
+ u64 dev_bucket;
+ u64 journal_seq;
+};
+
+struct buckets_waiting_for_journal_table {
+ unsigned bits;
+ u64 hash_seeds[3];
+ struct bucket_hashed d[];
+};
+
+struct buckets_waiting_for_journal {
+ struct mutex lock;
+ struct buckets_waiting_for_journal_table *t;
+};
+
+#endif /* _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H */
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
new file mode 100644
index 000000000000..f69e15dc699c
--- /dev/null
+++ b/fs/bcachefs/chardev.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_CHARDEV
+
+#include "bcachefs.h"
+#include "bcachefs_ioctl.h"
+#include "buckets.h"
+#include "chardev.h"
+#include "journal.h"
+#include "move.h"
+#include "replicas.h"
+#include "super.h"
+#include "super-io.h"
+
+#include <linux/anon_inodes.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/kthread.h>
+#include <linux/major.h>
+#include <linux/sched/task.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+/* returns with ref on ca->ref */
+static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev,
+ unsigned flags)
+{
+ struct bch_dev *ca;
+
+ if (flags & BCH_BY_INDEX) {
+ if (dev >= c->sb.nr_devices)
+ return ERR_PTR(-EINVAL);
+
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca)
+ percpu_ref_get(&ca->ref);
+ rcu_read_unlock();
+
+ if (!ca)
+ return ERR_PTR(-EINVAL);
+ } else {
+ char *path;
+
+ path = strndup_user((const char __user *)
+ (unsigned long) dev, PATH_MAX);
+ if (IS_ERR(path))
+ return ERR_CAST(path);
+
+ ca = bch2_dev_lookup(c, path);
+ kfree(path);
+ }
+
+ return ca;
+}
+
+#if 0
+static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
+{
+ struct bch_ioctl_assemble arg;
+ struct bch_fs *c;
+ u64 *user_devs = NULL;
+ char **devs = NULL;
+ unsigned i;
+ int ret = -EFAULT;
+
+ if (copy_from_user(&arg, user_arg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags || arg.pad)
+ return -EINVAL;
+
+ user_devs = kmalloc_array(arg.nr_devs, sizeof(u64), GFP_KERNEL);
+ if (!user_devs)
+ return -ENOMEM;
+
+ devs = kcalloc(arg.nr_devs, sizeof(char *), GFP_KERNEL);
+
+ if (copy_from_user(user_devs, user_arg->devs,
+ sizeof(u64) * arg.nr_devs))
+ goto err;
+
+ for (i = 0; i < arg.nr_devs; i++) {
+ devs[i] = strndup_user((const char __user *)(unsigned long)
+ user_devs[i],
+ PATH_MAX);
+ ret= PTR_ERR_OR_ZERO(devs[i]);
+ if (ret)
+ goto err;
+ }
+
+ c = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty());
+ ret = PTR_ERR_OR_ZERO(c);
+ if (!ret)
+ closure_put(&c->cl);
+err:
+ if (devs)
+ for (i = 0; i < arg.nr_devs; i++)
+ kfree(devs[i]);
+ kfree(devs);
+ return ret;
+}
+
+static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
+{
+ struct bch_ioctl_incremental arg;
+ const char *err;
+ char *path;
+
+ if (copy_from_user(&arg, user_arg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags || arg.pad)
+ return -EINVAL;
+
+ path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
+ ret = PTR_ERR_OR_ZERO(path);
+ if (ret)
+ return ret;
+
+ err = bch2_fs_open_incremental(path);
+ kfree(path);
+
+ if (err) {
+ pr_err("Could not register bcachefs devices: %s", err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+static long bch2_global_ioctl(unsigned cmd, void __user *arg)
+{
+ switch (cmd) {
+#if 0
+ case BCH_IOCTL_ASSEMBLE:
+ return bch2_ioctl_assemble(arg);
+ case BCH_IOCTL_INCREMENTAL:
+ return bch2_ioctl_incremental(arg);
+#endif
+ default:
+ return -ENOTTY;
+ }
+}
+
+static long bch2_ioctl_query_uuid(struct bch_fs *c,
+ struct bch_ioctl_query_uuid __user *user_arg)
+{
+ if (copy_to_user(&user_arg->uuid, &c->sb.user_uuid,
+ sizeof(c->sb.user_uuid)))
+ return -EFAULT;
+ return 0;
+}
+
+#if 0
+static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (arg.flags || arg.pad)
+ return -EINVAL;
+
+ return bch2_fs_start(c);
+}
+
+static long bch2_ioctl_stop(struct bch_fs *c)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ bch2_fs_stop(c);
+ return 0;
+}
+#endif
+
+static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg)
+{
+ char *path;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (arg.flags || arg.pad)
+ return -EINVAL;
+
+ path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
+ ret = PTR_ERR_OR_ZERO(path);
+ if (ret)
+ return ret;
+
+ ret = bch2_dev_add(c, path);
+ kfree(path);
+
+ return ret;
+}
+
+static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg)
+{
+ struct bch_dev *ca;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
+ BCH_FORCE_IF_METADATA_LOST|
+ BCH_FORCE_IF_DEGRADED|
+ BCH_BY_INDEX)) ||
+ arg.pad)
+ return -EINVAL;
+
+ ca = bch2_device_lookup(c, arg.dev, arg.flags);
+ if (IS_ERR(ca))
+ return PTR_ERR(ca);
+
+ return bch2_dev_remove(c, ca, arg.flags);
+}
+
+static long bch2_ioctl_disk_online(struct bch_fs *c, struct bch_ioctl_disk arg)
+{
+ char *path;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (arg.flags || arg.pad)
+ return -EINVAL;
+
+ path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX);
+ ret = PTR_ERR_OR_ZERO(path);
+ if (ret)
+ return ret;
+
+ ret = bch2_dev_online(c, path);
+ kfree(path);
+ return ret;
+}
+
+static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg)
+{
+ struct bch_dev *ca;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
+ BCH_FORCE_IF_METADATA_LOST|
+ BCH_FORCE_IF_DEGRADED|
+ BCH_BY_INDEX)) ||
+ arg.pad)
+ return -EINVAL;
+
+ ca = bch2_device_lookup(c, arg.dev, arg.flags);
+ if (IS_ERR(ca))
+ return PTR_ERR(ca);
+
+ ret = bch2_dev_offline(c, ca, arg.flags);
+ percpu_ref_put(&ca->ref);
+ return ret;
+}
+
+static long bch2_ioctl_disk_set_state(struct bch_fs *c,
+ struct bch_ioctl_disk_set_state arg)
+{
+ struct bch_dev *ca;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST|
+ BCH_FORCE_IF_METADATA_LOST|
+ BCH_FORCE_IF_DEGRADED|
+ BCH_BY_INDEX)) ||
+ arg.pad[0] || arg.pad[1] || arg.pad[2] ||
+ arg.new_state >= BCH_MEMBER_STATE_NR)
+ return -EINVAL;
+
+ ca = bch2_device_lookup(c, arg.dev, arg.flags);
+ if (IS_ERR(ca))
+ return PTR_ERR(ca);
+
+ ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags);
+ if (ret)
+ bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
+
+ percpu_ref_put(&ca->ref);
+ return ret;
+}
+
+struct bch_data_ctx {
+ struct bch_fs *c;
+ struct bch_ioctl_data arg;
+ struct bch_move_stats stats;
+
+ int ret;
+
+ struct task_struct *thread;
+};
+
+static int bch2_data_thread(void *arg)
+{
+ struct bch_data_ctx *ctx = arg;
+
+ ctx->ret = bch2_data_job(ctx->c, &ctx->stats, ctx->arg);
+
+ ctx->stats.data_type = U8_MAX;
+ return 0;
+}
+
+static int bch2_data_job_release(struct inode *inode, struct file *file)
+{
+ struct bch_data_ctx *ctx = file->private_data;
+
+ kthread_stop(ctx->thread);
+ put_task_struct(ctx->thread);
+ kfree(ctx);
+ return 0;
+}
+
+static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct bch_data_ctx *ctx = file->private_data;
+ struct bch_fs *c = ctx->c;
+ struct bch_ioctl_data_event e = {
+ .type = BCH_DATA_EVENT_PROGRESS,
+ .p.data_type = ctx->stats.data_type,
+ .p.btree_id = ctx->stats.btree_id,
+ .p.pos = ctx->stats.pos,
+ .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
+ .p.sectors_total = bch2_fs_usage_read_short(c).used,
+ };
+
+ if (len < sizeof(e))
+ return -EINVAL;
+
+ if (copy_to_user(buf, &e, sizeof(e)))
+ return -EFAULT;
+
+ return sizeof(e);
+}
+
+static const struct file_operations bcachefs_data_ops = {
+ .release = bch2_data_job_release,
+ .read = bch2_data_job_read,
+ .llseek = no_llseek,
+};
+
+static long bch2_ioctl_data(struct bch_fs *c,
+ struct bch_ioctl_data arg)
+{
+ struct bch_data_ctx *ctx = NULL;
+ struct file *file = NULL;
+ unsigned flags = O_RDONLY|O_CLOEXEC|O_NONBLOCK;
+ int ret, fd = -1;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (arg.op >= BCH_DATA_OP_NR || arg.flags)
+ return -EINVAL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->c = c;
+ ctx->arg = arg;
+
+ ctx->thread = kthread_create(bch2_data_thread, ctx,
+ "bch-data/%s", c->name);
+ if (IS_ERR(ctx->thread)) {
+ ret = PTR_ERR(ctx->thread);
+ goto err;
+ }
+
+ ret = get_unused_fd_flags(flags);
+ if (ret < 0)
+ goto err;
+ fd = ret;
+
+ file = anon_inode_getfile("[bcachefs]", &bcachefs_data_ops, ctx, flags);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err;
+ }
+
+ fd_install(fd, file);
+
+ get_task_struct(ctx->thread);
+ wake_up_process(ctx->thread);
+
+ return fd;
+err:
+ if (fd >= 0)
+ put_unused_fd(fd);
+ if (!IS_ERR_OR_NULL(ctx->thread))
+ kthread_stop(ctx->thread);
+ kfree(ctx);
+ return ret;
+}
+
+static long bch2_ioctl_fs_usage(struct bch_fs *c,
+ struct bch_ioctl_fs_usage __user *user_arg)
+{
+ struct bch_ioctl_fs_usage *arg = NULL;
+ struct bch_replicas_usage *dst_e, *dst_end;
+ struct bch_fs_usage_online *src;
+ u32 replica_entries_bytes;
+ unsigned i;
+ int ret = 0;
+
+ if (!test_bit(BCH_FS_STARTED, &c->flags))
+ return -EINVAL;
+
+ if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes))
+ return -EFAULT;
+
+ arg = kzalloc(size_add(sizeof(*arg), replica_entries_bytes), GFP_KERNEL);
+ if (!arg)
+ return -ENOMEM;
+
+ src = bch2_fs_usage_read(c);
+ if (!src) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ arg->capacity = c->capacity;
+ arg->used = bch2_fs_sectors_used(c, src);
+ arg->online_reserved = src->online_reserved;
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ arg->persistent_reserved[i] = src->u.persistent_reserved[i];
+
+ dst_e = arg->replicas;
+ dst_end = (void *) arg->replicas + replica_entries_bytes;
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *src_e =
+ cpu_replicas_entry(&c->replicas, i);
+
+ /* check that we have enough space for one replicas entry */
+ if (dst_e + 1 > dst_end) {
+ ret = -ERANGE;
+ break;
+ }
+
+ dst_e->sectors = src->u.replicas[i];
+ dst_e->r = *src_e;
+
+ /* recheck after setting nr_devs: */
+ if (replicas_usage_next(dst_e) > dst_end) {
+ ret = -ERANGE;
+ break;
+ }
+
+ memcpy(dst_e->r.devs, src_e->devs, src_e->nr_devs);
+
+ dst_e = replicas_usage_next(dst_e);
+ }
+
+ arg->replica_entries_bytes = (void *) dst_e - (void *) arg->replicas;
+
+ percpu_up_read(&c->mark_lock);
+ kfree(src);
+
+ if (ret)
+ goto err;
+ if (copy_to_user(user_arg, arg,
+ sizeof(*arg) + arg->replica_entries_bytes))
+ ret = -EFAULT;
+err:
+ kfree(arg);
+ return ret;
+}
+
+static long bch2_ioctl_dev_usage(struct bch_fs *c,
+ struct bch_ioctl_dev_usage __user *user_arg)
+{
+ struct bch_ioctl_dev_usage arg;
+ struct bch_dev_usage src;
+ struct bch_dev *ca;
+ unsigned i;
+
+ if (!test_bit(BCH_FS_STARTED, &c->flags))
+ return -EINVAL;
+
+ if (copy_from_user(&arg, user_arg, sizeof(arg)))
+ return -EFAULT;
+
+ if ((arg.flags & ~BCH_BY_INDEX) ||
+ arg.pad[0] ||
+ arg.pad[1] ||
+ arg.pad[2])
+ return -EINVAL;
+
+ ca = bch2_device_lookup(c, arg.dev, arg.flags);
+ if (IS_ERR(ca))
+ return PTR_ERR(ca);
+
+ src = bch2_dev_usage_read(ca);
+
+ arg.state = ca->mi.state;
+ arg.bucket_size = ca->mi.bucket_size;
+ arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
+ arg.buckets_ec = src.buckets_ec;
+
+ for (i = 0; i < BCH_DATA_NR; i++) {
+ arg.d[i].buckets = src.d[i].buckets;
+ arg.d[i].sectors = src.d[i].sectors;
+ arg.d[i].fragmented = src.d[i].fragmented;
+ }
+
+ percpu_ref_put(&ca->ref);
+
+ if (copy_to_user(user_arg, &arg, sizeof(arg)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long bch2_ioctl_read_super(struct bch_fs *c,
+ struct bch_ioctl_read_super arg)
+{
+ struct bch_dev *ca = NULL;
+ struct bch_sb *sb;
+ int ret = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((arg.flags & ~(BCH_BY_INDEX|BCH_READ_DEV)) ||
+ arg.pad)
+ return -EINVAL;
+
+ mutex_lock(&c->sb_lock);
+
+ if (arg.flags & BCH_READ_DEV) {
+ ca = bch2_device_lookup(c, arg.dev, arg.flags);
+
+ if (IS_ERR(ca)) {
+ ret = PTR_ERR(ca);
+ goto err;
+ }
+
+ sb = ca->disk_sb.sb;
+ } else {
+ sb = c->disk_sb.sb;
+ }
+
+ if (vstruct_bytes(sb) > arg.size) {
+ ret = -ERANGE;
+ goto err;
+ }
+
+ if (copy_to_user((void __user *)(unsigned long)arg.sb, sb,
+ vstruct_bytes(sb)))
+ ret = -EFAULT;
+err:
+ if (!IS_ERR_OR_NULL(ca))
+ percpu_ref_put(&ca->ref);
+ mutex_unlock(&c->sb_lock);
+ return ret;
+}
+
+static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
+ struct bch_ioctl_disk_get_idx arg)
+{
+ dev_t dev = huge_decode_dev(arg.dev);
+ struct bch_dev *ca;
+ unsigned i;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!dev)
+ return -EINVAL;
+
+ for_each_online_member(ca, c, i)
+ if (ca->dev == dev) {
+ percpu_ref_put(&ca->io_ref);
+ return i;
+ }
+
+ return -BCH_ERR_ENOENT_dev_idx_not_found;
+}
+
+static long bch2_ioctl_disk_resize(struct bch_fs *c,
+ struct bch_ioctl_disk_resize arg)
+{
+ struct bch_dev *ca;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((arg.flags & ~BCH_BY_INDEX) ||
+ arg.pad)
+ return -EINVAL;
+
+ ca = bch2_device_lookup(c, arg.dev, arg.flags);
+ if (IS_ERR(ca))
+ return PTR_ERR(ca);
+
+ ret = bch2_dev_resize(c, ca, arg.nbuckets);
+
+ percpu_ref_put(&ca->ref);
+ return ret;
+}
+
+static long bch2_ioctl_disk_resize_journal(struct bch_fs *c,
+ struct bch_ioctl_disk_resize_journal arg)
+{
+ struct bch_dev *ca;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((arg.flags & ~BCH_BY_INDEX) ||
+ arg.pad)
+ return -EINVAL;
+
+ if (arg.nbuckets > U32_MAX)
+ return -EINVAL;
+
+ ca = bch2_device_lookup(c, arg.dev, arg.flags);
+ if (IS_ERR(ca))
+ return PTR_ERR(ca);
+
+ ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets);
+
+ percpu_ref_put(&ca->ref);
+ return ret;
+}
+
+#define BCH_IOCTL(_name, _argtype) \
+do { \
+ _argtype i; \
+ \
+ if (copy_from_user(&i, arg, sizeof(i))) \
+ return -EFAULT; \
+ ret = bch2_ioctl_##_name(c, i); \
+ goto out; \
+} while (0)
+
+long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case BCH_IOCTL_QUERY_UUID:
+ return bch2_ioctl_query_uuid(c, arg);
+ case BCH_IOCTL_FS_USAGE:
+ return bch2_ioctl_fs_usage(c, arg);
+ case BCH_IOCTL_DEV_USAGE:
+ return bch2_ioctl_dev_usage(c, arg);
+#if 0
+ case BCH_IOCTL_START:
+ BCH_IOCTL(start, struct bch_ioctl_start);
+ case BCH_IOCTL_STOP:
+ return bch2_ioctl_stop(c);
+#endif
+ case BCH_IOCTL_READ_SUPER:
+ BCH_IOCTL(read_super, struct bch_ioctl_read_super);
+ case BCH_IOCTL_DISK_GET_IDX:
+ BCH_IOCTL(disk_get_idx, struct bch_ioctl_disk_get_idx);
+ }
+
+ if (!test_bit(BCH_FS_STARTED, &c->flags))
+ return -EINVAL;
+
+ switch (cmd) {
+ case BCH_IOCTL_DISK_ADD:
+ BCH_IOCTL(disk_add, struct bch_ioctl_disk);
+ case BCH_IOCTL_DISK_REMOVE:
+ BCH_IOCTL(disk_remove, struct bch_ioctl_disk);
+ case BCH_IOCTL_DISK_ONLINE:
+ BCH_IOCTL(disk_online, struct bch_ioctl_disk);
+ case BCH_IOCTL_DISK_OFFLINE:
+ BCH_IOCTL(disk_offline, struct bch_ioctl_disk);
+ case BCH_IOCTL_DISK_SET_STATE:
+ BCH_IOCTL(disk_set_state, struct bch_ioctl_disk_set_state);
+ case BCH_IOCTL_DATA:
+ BCH_IOCTL(data, struct bch_ioctl_data);
+ case BCH_IOCTL_DISK_RESIZE:
+ BCH_IOCTL(disk_resize, struct bch_ioctl_disk_resize);
+ case BCH_IOCTL_DISK_RESIZE_JOURNAL:
+ BCH_IOCTL(disk_resize_journal, struct bch_ioctl_disk_resize_journal);
+
+ default:
+ return -ENOTTY;
+ }
+out:
+ if (ret < 0)
+ ret = bch2_err_class(ret);
+ return ret;
+}
+
+static DEFINE_IDR(bch_chardev_minor);
+
+static long bch2_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v)
+{
+ unsigned minor = iminor(file_inode(filp));
+ struct bch_fs *c = minor < U8_MAX ? idr_find(&bch_chardev_minor, minor) : NULL;
+ void __user *arg = (void __user *) v;
+
+ return c
+ ? bch2_fs_ioctl(c, cmd, arg)
+ : bch2_global_ioctl(cmd, arg);
+}
+
+static const struct file_operations bch_chardev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = bch2_chardev_ioctl,
+ .open = nonseekable_open,
+};
+
+static int bch_chardev_major;
+static struct class *bch_chardev_class;
+static struct device *bch_chardev;
+
+void bch2_fs_chardev_exit(struct bch_fs *c)
+{
+ if (!IS_ERR_OR_NULL(c->chardev))
+ device_unregister(c->chardev);
+ if (c->minor >= 0)
+ idr_remove(&bch_chardev_minor, c->minor);
+}
+
+int bch2_fs_chardev_init(struct bch_fs *c)
+{
+ c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL);
+ if (c->minor < 0)
+ return c->minor;
+
+ c->chardev = device_create(bch_chardev_class, NULL,
+ MKDEV(bch_chardev_major, c->minor), c,
+ "bcachefs%u-ctl", c->minor);
+ if (IS_ERR(c->chardev))
+ return PTR_ERR(c->chardev);
+
+ return 0;
+}
+
+void bch2_chardev_exit(void)
+{
+ if (!IS_ERR_OR_NULL(bch_chardev_class))
+ device_destroy(bch_chardev_class,
+ MKDEV(bch_chardev_major, U8_MAX));
+ if (!IS_ERR_OR_NULL(bch_chardev_class))
+ class_destroy(bch_chardev_class);
+ if (bch_chardev_major > 0)
+ unregister_chrdev(bch_chardev_major, "bcachefs");
+}
+
+int __init bch2_chardev_init(void)
+{
+ bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops);
+ if (bch_chardev_major < 0)
+ return bch_chardev_major;
+
+ bch_chardev_class = class_create("bcachefs");
+ if (IS_ERR(bch_chardev_class))
+ return PTR_ERR(bch_chardev_class);
+
+ bch_chardev = device_create(bch_chardev_class, NULL,
+ MKDEV(bch_chardev_major, U8_MAX),
+ NULL, "bcachefs-ctl");
+ if (IS_ERR(bch_chardev))
+ return PTR_ERR(bch_chardev);
+
+ return 0;
+}
+
+#endif /* NO_BCACHEFS_CHARDEV */
diff --git a/fs/bcachefs/chardev.h b/fs/bcachefs/chardev.h
new file mode 100644
index 000000000000..0f563ca53c36
--- /dev/null
+++ b/fs/bcachefs/chardev.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_CHARDEV_H
+#define _BCACHEFS_CHARDEV_H
+
+#ifndef NO_BCACHEFS_FS
+
+long bch2_fs_ioctl(struct bch_fs *, unsigned, void __user *);
+
+void bch2_fs_chardev_exit(struct bch_fs *);
+int bch2_fs_chardev_init(struct bch_fs *);
+
+void bch2_chardev_exit(void);
+int __init bch2_chardev_init(void);
+
+#else
+
+static inline long bch2_fs_ioctl(struct bch_fs *c,
+ unsigned cmd, void __user * arg)
+{
+ return -ENOTTY;
+}
+
+static inline void bch2_fs_chardev_exit(struct bch_fs *c) {}
+static inline int bch2_fs_chardev_init(struct bch_fs *c) { return 0; }
+
+static inline void bch2_chardev_exit(void) {}
+static inline int __init bch2_chardev_init(void) { return 0; }
+
+#endif /* NO_BCACHEFS_FS */
+
+#endif /* _BCACHEFS_CHARDEV_H */
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
new file mode 100644
index 000000000000..3c761ad6b1c8
--- /dev/null
+++ b/fs/bcachefs/checksum.c
@@ -0,0 +1,804 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "checksum.h"
+#include "errcode.h"
+#include "super.h"
+#include "super-io.h"
+
+#include <linux/crc32c.h>
+#include <linux/crypto.h>
+#include <linux/xxhash.h>
+#include <linux/key.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/hash.h>
+#include <crypto/poly1305.h>
+#include <crypto/skcipher.h>
+#include <keys/user-type.h>
+
+/*
+ * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
+ * it features page merging without having the checksum algorithm lose its state.
+ * for native checksum aglorithms (like crc), a default seed value will do.
+ * for hash-like algorithms, a state needs to be stored
+ */
+
+struct bch2_checksum_state {
+ union {
+ u64 seed;
+ struct xxh64_state h64state;
+ };
+ unsigned int type;
+};
+
+static void bch2_checksum_init(struct bch2_checksum_state *state)
+{
+ switch (state->type) {
+ case BCH_CSUM_none:
+ case BCH_CSUM_crc32c:
+ case BCH_CSUM_crc64:
+ state->seed = 0;
+ break;
+ case BCH_CSUM_crc32c_nonzero:
+ state->seed = U32_MAX;
+ break;
+ case BCH_CSUM_crc64_nonzero:
+ state->seed = U64_MAX;
+ break;
+ case BCH_CSUM_xxhash:
+ xxh64_reset(&state->h64state, 0);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static u64 bch2_checksum_final(const struct bch2_checksum_state *state)
+{
+ switch (state->type) {
+ case BCH_CSUM_none:
+ case BCH_CSUM_crc32c:
+ case BCH_CSUM_crc64:
+ return state->seed;
+ case BCH_CSUM_crc32c_nonzero:
+ return state->seed ^ U32_MAX;
+ case BCH_CSUM_crc64_nonzero:
+ return state->seed ^ U64_MAX;
+ case BCH_CSUM_xxhash:
+ return xxh64_digest(&state->h64state);
+ default:
+ BUG();
+ }
+}
+
+static void bch2_checksum_update(struct bch2_checksum_state *state, const void *data, size_t len)
+{
+ switch (state->type) {
+ case BCH_CSUM_none:
+ return;
+ case BCH_CSUM_crc32c_nonzero:
+ case BCH_CSUM_crc32c:
+ state->seed = crc32c(state->seed, data, len);
+ break;
+ case BCH_CSUM_crc64_nonzero:
+ case BCH_CSUM_crc64:
+ state->seed = crc64_be(state->seed, data, len);
+ break;
+ case BCH_CSUM_xxhash:
+ xxh64_update(&state->h64state, data, len);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
+ struct nonce nonce,
+ struct scatterlist *sg, size_t len)
+{
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ int ret;
+
+ skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
+
+ ret = crypto_skcipher_encrypt(req);
+ if (ret)
+ pr_err("got error %i from crypto_skcipher_encrypt()", ret);
+
+ return ret;
+}
+
+static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
+ struct nonce nonce,
+ void *buf, size_t len)
+{
+ if (!is_vmalloc_addr(buf)) {
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg,
+ is_vmalloc_addr(buf)
+ ? vmalloc_to_page(buf)
+ : virt_to_page(buf),
+ len, offset_in_page(buf));
+ return do_encrypt_sg(tfm, nonce, &sg, len);
+ } else {
+ unsigned pages = buf_pages(buf, len);
+ struct scatterlist *sg;
+ size_t orig_len = len;
+ int ret, i;
+
+ sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -BCH_ERR_ENOMEM_do_encrypt;
+
+ sg_init_table(sg, pages);
+
+ for (i = 0; i < pages; i++) {
+ unsigned offset = offset_in_page(buf);
+ unsigned pg_len = min_t(size_t, len, PAGE_SIZE - offset);
+
+ sg_set_page(sg + i, vmalloc_to_page(buf), pg_len, offset);
+ buf += pg_len;
+ len -= pg_len;
+ }
+
+ ret = do_encrypt_sg(tfm, nonce, sg, orig_len);
+ kfree(sg);
+ return ret;
+ }
+}
+
+int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
+ void *buf, size_t len)
+{
+ struct crypto_sync_skcipher *chacha20 =
+ crypto_alloc_sync_skcipher("chacha20", 0, 0);
+ int ret;
+
+ ret = PTR_ERR_OR_ZERO(chacha20);
+ if (ret) {
+ pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
+ return ret;
+ }
+
+ ret = crypto_skcipher_setkey(&chacha20->base,
+ (void *) key, sizeof(*key));
+ if (ret) {
+ pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
+ goto err;
+ }
+
+ ret = do_encrypt(chacha20, nonce, buf, len);
+err:
+ crypto_free_sync_skcipher(chacha20);
+ return ret;
+}
+
+static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
+ struct nonce nonce)
+{
+ u8 key[POLY1305_KEY_SIZE];
+ int ret;
+
+ nonce.d[3] ^= BCH_NONCE_POLY;
+
+ memset(key, 0, sizeof(key));
+ ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
+ if (ret)
+ return ret;
+
+ desc->tfm = c->poly1305;
+ crypto_shash_init(desc);
+ crypto_shash_update(desc, key, sizeof(key));
+ return 0;
+}
+
+struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
+ struct nonce nonce, const void *data, size_t len)
+{
+ switch (type) {
+ case BCH_CSUM_none:
+ case BCH_CSUM_crc32c_nonzero:
+ case BCH_CSUM_crc64_nonzero:
+ case BCH_CSUM_crc32c:
+ case BCH_CSUM_xxhash:
+ case BCH_CSUM_crc64: {
+ struct bch2_checksum_state state;
+
+ state.type = type;
+
+ bch2_checksum_init(&state);
+ bch2_checksum_update(&state, data, len);
+
+ return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
+ }
+
+ case BCH_CSUM_chacha20_poly1305_80:
+ case BCH_CSUM_chacha20_poly1305_128: {
+ SHASH_DESC_ON_STACK(desc, c->poly1305);
+ u8 digest[POLY1305_DIGEST_SIZE];
+ struct bch_csum ret = { 0 };
+
+ gen_poly_key(c, desc, nonce);
+
+ crypto_shash_update(desc, data, len);
+ crypto_shash_final(desc, digest);
+
+ memcpy(&ret, digest, bch_crc_bytes[type]);
+ return ret;
+ }
+ default:
+ BUG();
+ }
+}
+
+int bch2_encrypt(struct bch_fs *c, unsigned type,
+ struct nonce nonce, void *data, size_t len)
+{
+ if (!bch2_csum_type_is_encryption(type))
+ return 0;
+
+ return do_encrypt(c->chacha20, nonce, data, len);
+}
+
+static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio,
+ struct bvec_iter *iter)
+{
+ struct bio_vec bv;
+
+ switch (type) {
+ case BCH_CSUM_none:
+ return (struct bch_csum) { 0 };
+ case BCH_CSUM_crc32c_nonzero:
+ case BCH_CSUM_crc64_nonzero:
+ case BCH_CSUM_crc32c:
+ case BCH_CSUM_xxhash:
+ case BCH_CSUM_crc64: {
+ struct bch2_checksum_state state;
+
+ state.type = type;
+ bch2_checksum_init(&state);
+
+#ifdef CONFIG_HIGHMEM
+ __bio_for_each_segment(bv, bio, *iter, *iter) {
+ void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
+
+ bch2_checksum_update(&state, p, bv.bv_len);
+ kunmap_local(p);
+ }
+#else
+ __bio_for_each_bvec(bv, bio, *iter, *iter)
+ bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset,
+ bv.bv_len);
+#endif
+ return (struct bch_csum) { .lo = cpu_to_le64(bch2_checksum_final(&state)) };
+ }
+
+ case BCH_CSUM_chacha20_poly1305_80:
+ case BCH_CSUM_chacha20_poly1305_128: {
+ SHASH_DESC_ON_STACK(desc, c->poly1305);
+ u8 digest[POLY1305_DIGEST_SIZE];
+ struct bch_csum ret = { 0 };
+
+ gen_poly_key(c, desc, nonce);
+
+#ifdef CONFIG_HIGHMEM
+ __bio_for_each_segment(bv, bio, *iter, *iter) {
+ void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
+
+ crypto_shash_update(desc, p, bv.bv_len);
+ kunmap_local(p);
+ }
+#else
+ __bio_for_each_bvec(bv, bio, *iter, *iter)
+ crypto_shash_update(desc,
+ page_address(bv.bv_page) + bv.bv_offset,
+ bv.bv_len);
+#endif
+ crypto_shash_final(desc, digest);
+
+ memcpy(&ret, digest, bch_crc_bytes[type]);
+ return ret;
+ }
+ default:
+ BUG();
+ }
+}
+
+struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio)
+{
+ struct bvec_iter iter = bio->bi_iter;
+
+ return __bch2_checksum_bio(c, type, nonce, bio, &iter);
+}
+
+int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio)
+{
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ struct scatterlist sgl[16], *sg = sgl;
+ size_t bytes = 0;
+ int ret = 0;
+
+ if (!bch2_csum_type_is_encryption(type))
+ return 0;
+
+ sg_init_table(sgl, ARRAY_SIZE(sgl));
+
+ bio_for_each_segment(bv, bio, iter) {
+ if (sg == sgl + ARRAY_SIZE(sgl)) {
+ sg_mark_end(sg - 1);
+
+ ret = do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+ if (ret)
+ return ret;
+
+ nonce = nonce_add(nonce, bytes);
+ bytes = 0;
+
+ sg_init_table(sgl, ARRAY_SIZE(sgl));
+ sg = sgl;
+ }
+
+ sg_set_page(sg++, bv.bv_page, bv.bv_len, bv.bv_offset);
+ bytes += bv.bv_len;
+ }
+
+ sg_mark_end(sg - 1);
+ return do_encrypt_sg(c->chacha20, nonce, sgl, bytes);
+}
+
+struct bch_csum bch2_checksum_merge(unsigned type, struct bch_csum a,
+ struct bch_csum b, size_t b_len)
+{
+ struct bch2_checksum_state state;
+
+ state.type = type;
+ bch2_checksum_init(&state);
+ state.seed = le64_to_cpu(a.lo);
+
+ BUG_ON(!bch2_checksum_mergeable(type));
+
+ while (b_len) {
+ unsigned page_len = min_t(unsigned, b_len, PAGE_SIZE);
+
+ bch2_checksum_update(&state,
+ page_address(ZERO_PAGE(0)), page_len);
+ b_len -= page_len;
+ }
+ a.lo = cpu_to_le64(bch2_checksum_final(&state));
+ a.lo ^= b.lo;
+ a.hi ^= b.hi;
+ return a;
+}
+
+int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
+ struct bversion version,
+ struct bch_extent_crc_unpacked crc_old,
+ struct bch_extent_crc_unpacked *crc_a,
+ struct bch_extent_crc_unpacked *crc_b,
+ unsigned len_a, unsigned len_b,
+ unsigned new_csum_type)
+{
+ struct bvec_iter iter = bio->bi_iter;
+ struct nonce nonce = extent_nonce(version, crc_old);
+ struct bch_csum merged = { 0 };
+ struct crc_split {
+ struct bch_extent_crc_unpacked *crc;
+ unsigned len;
+ unsigned csum_type;
+ struct bch_csum csum;
+ } splits[3] = {
+ { crc_a, len_a, new_csum_type, { 0 }},
+ { crc_b, len_b, new_csum_type, { 0 } },
+ { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } },
+ }, *i;
+ bool mergeable = crc_old.csum_type == new_csum_type &&
+ bch2_checksum_mergeable(new_csum_type);
+ unsigned crc_nonce = crc_old.nonce;
+
+ BUG_ON(len_a + len_b > bio_sectors(bio));
+ BUG_ON(crc_old.uncompressed_size != bio_sectors(bio));
+ BUG_ON(crc_is_compressed(crc_old));
+ BUG_ON(bch2_csum_type_is_encryption(crc_old.csum_type) !=
+ bch2_csum_type_is_encryption(new_csum_type));
+
+ for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
+ iter.bi_size = i->len << 9;
+ if (mergeable || i->crc)
+ i->csum = __bch2_checksum_bio(c, i->csum_type,
+ nonce, bio, &iter);
+ else
+ bio_advance_iter(bio, &iter, i->len << 9);
+ nonce = nonce_add(nonce, i->len << 9);
+ }
+
+ if (mergeable)
+ for (i = splits; i < splits + ARRAY_SIZE(splits); i++)
+ merged = bch2_checksum_merge(new_csum_type, merged,
+ i->csum, i->len << 9);
+ else
+ merged = bch2_checksum_bio(c, crc_old.csum_type,
+ extent_nonce(version, crc_old), bio);
+
+ if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
+ bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
+ "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
+ __func__,
+ crc_old.csum.hi,
+ crc_old.csum.lo,
+ merged.hi,
+ merged.lo,
+ bch2_csum_types[crc_old.csum_type],
+ bch2_csum_types[new_csum_type]);
+ return -EIO;
+ }
+
+ for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
+ if (i->crc)
+ *i->crc = (struct bch_extent_crc_unpacked) {
+ .csum_type = i->csum_type,
+ .compression_type = crc_old.compression_type,
+ .compressed_size = i->len,
+ .uncompressed_size = i->len,
+ .offset = 0,
+ .live_size = i->len,
+ .nonce = crc_nonce,
+ .csum = i->csum,
+ };
+
+ if (bch2_csum_type_is_encryption(new_csum_type))
+ crc_nonce += i->len;
+ }
+
+ return 0;
+}
+
+/* BCH_SB_FIELD_crypt: */
+
+static int bch2_sb_crypt_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
+
+ if (vstruct_bytes(&crypt->field) < sizeof(*crypt)) {
+ prt_printf(err, "wrong size (got %zu should be %zu)",
+ vstruct_bytes(&crypt->field), sizeof(*crypt));
+ return -BCH_ERR_invalid_sb_crypt;
+ }
+
+ if (BCH_CRYPT_KDF_TYPE(crypt)) {
+ prt_printf(err, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt));
+ return -BCH_ERR_invalid_sb_crypt;
+ }
+
+ return 0;
+}
+
+static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
+
+ prt_printf(out, "KFD: %llu", BCH_CRYPT_KDF_TYPE(crypt));
+ prt_newline(out);
+ prt_printf(out, "scrypt n: %llu", BCH_KDF_SCRYPT_N(crypt));
+ prt_newline(out);
+ prt_printf(out, "scrypt r: %llu", BCH_KDF_SCRYPT_R(crypt));
+ prt_newline(out);
+ prt_printf(out, "scrypt p: %llu", BCH_KDF_SCRYPT_P(crypt));
+ prt_newline(out);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
+ .validate = bch2_sb_crypt_validate,
+ .to_text = bch2_sb_crypt_to_text,
+};
+
+#ifdef __KERNEL__
+static int __bch2_request_key(char *key_description, struct bch_key *key)
+{
+ struct key *keyring_key;
+ const struct user_key_payload *ukp;
+ int ret;
+
+ keyring_key = request_key(&key_type_user, key_description, NULL);
+ if (IS_ERR(keyring_key))
+ return PTR_ERR(keyring_key);
+
+ down_read(&keyring_key->sem);
+ ukp = dereference_key_locked(keyring_key);
+ if (ukp->datalen == sizeof(*key)) {
+ memcpy(key, ukp->data, ukp->datalen);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ up_read(&keyring_key->sem);
+ key_put(keyring_key);
+
+ return ret;
+}
+#else
+#include <keyutils.h>
+
+static int __bch2_request_key(char *key_description, struct bch_key *key)
+{
+ key_serial_t key_id;
+
+ key_id = request_key("user", key_description, NULL,
+ KEY_SPEC_SESSION_KEYRING);
+ if (key_id >= 0)
+ goto got_key;
+
+ key_id = request_key("user", key_description, NULL,
+ KEY_SPEC_USER_KEYRING);
+ if (key_id >= 0)
+ goto got_key;
+
+ key_id = request_key("user", key_description, NULL,
+ KEY_SPEC_USER_SESSION_KEYRING);
+ if (key_id >= 0)
+ goto got_key;
+
+ return -errno;
+got_key:
+
+ if (keyctl_read(key_id, (void *) key, sizeof(*key)) != sizeof(*key))
+ return -1;
+
+ return 0;
+}
+
+#include "../crypto.h"
+#endif
+
+int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
+{
+ struct printbuf key_description = PRINTBUF;
+ int ret;
+
+ prt_printf(&key_description, "bcachefs:");
+ pr_uuid(&key_description, sb->user_uuid.b);
+
+ ret = __bch2_request_key(key_description.buf, key);
+ printbuf_exit(&key_description);
+
+#ifndef __KERNEL__
+ if (ret) {
+ char *passphrase = read_passphrase("Enter passphrase: ");
+ struct bch_encrypted_key sb_key;
+
+ bch2_passphrase_check(sb, passphrase,
+ key, &sb_key);
+ ret = 0;
+ }
+#endif
+
+ /* stash with memfd, pass memfd fd to mount */
+
+ return ret;
+}
+
+#ifndef __KERNEL__
+int bch2_revoke_key(struct bch_sb *sb)
+{
+ key_serial_t key_id;
+ struct printbuf key_description = PRINTBUF;
+
+ prt_printf(&key_description, "bcachefs:");
+ pr_uuid(&key_description, sb->user_uuid.b);
+
+ key_id = request_key("user", key_description.buf, NULL, KEY_SPEC_USER_KEYRING);
+ printbuf_exit(&key_description);
+ if (key_id < 0)
+ return errno;
+
+ keyctl_revoke(key_id);
+
+ return 0;
+}
+#endif
+
+int bch2_decrypt_sb_key(struct bch_fs *c,
+ struct bch_sb_field_crypt *crypt,
+ struct bch_key *key)
+{
+ struct bch_encrypted_key sb_key = crypt->key;
+ struct bch_key user_key;
+ int ret = 0;
+
+ /* is key encrypted? */
+ if (!bch2_key_is_encrypted(&sb_key))
+ goto out;
+
+ ret = bch2_request_key(c->disk_sb.sb, &user_key);
+ if (ret) {
+ bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
+ goto err;
+ }
+
+ /* decrypt real key: */
+ ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
+ &sb_key, sizeof(sb_key));
+ if (ret)
+ goto err;
+
+ if (bch2_key_is_encrypted(&sb_key)) {
+ bch_err(c, "incorrect encryption key");
+ ret = -EINVAL;
+ goto err;
+ }
+out:
+ *key = sb_key.key;
+err:
+ memzero_explicit(&sb_key, sizeof(sb_key));
+ memzero_explicit(&user_key, sizeof(user_key));
+ return ret;
+}
+
+static int bch2_alloc_ciphers(struct bch_fs *c)
+{
+ int ret;
+
+ if (!c->chacha20)
+ c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
+ ret = PTR_ERR_OR_ZERO(c->chacha20);
+
+ if (ret) {
+ bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
+ return ret;
+ }
+
+ if (!c->poly1305)
+ c->poly1305 = crypto_alloc_shash("poly1305", 0, 0);
+ ret = PTR_ERR_OR_ZERO(c->poly1305);
+
+ if (ret) {
+ bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+int bch2_disable_encryption(struct bch_fs *c)
+{
+ struct bch_sb_field_crypt *crypt;
+ struct bch_key key;
+ int ret = -EINVAL;
+
+ mutex_lock(&c->sb_lock);
+
+ crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
+ if (!crypt)
+ goto out;
+
+ /* is key encrypted? */
+ ret = 0;
+ if (bch2_key_is_encrypted(&crypt->key))
+ goto out;
+
+ ret = bch2_decrypt_sb_key(c, crypt, &key);
+ if (ret)
+ goto out;
+
+ crypt->key.magic = cpu_to_le64(BCH_KEY_MAGIC);
+ crypt->key.key = key;
+
+ SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 0);
+ bch2_write_super(c);
+out:
+ mutex_unlock(&c->sb_lock);
+
+ return ret;
+}
+
+int bch2_enable_encryption(struct bch_fs *c, bool keyed)
+{
+ struct bch_encrypted_key key;
+ struct bch_key user_key;
+ struct bch_sb_field_crypt *crypt;
+ int ret = -EINVAL;
+
+ mutex_lock(&c->sb_lock);
+
+ /* Do we already have an encryption key? */
+ if (bch2_sb_field_get(c->disk_sb.sb, crypt))
+ goto err;
+
+ ret = bch2_alloc_ciphers(c);
+ if (ret)
+ goto err;
+
+ key.magic = cpu_to_le64(BCH_KEY_MAGIC);
+ get_random_bytes(&key.key, sizeof(key.key));
+
+ if (keyed) {
+ ret = bch2_request_key(c->disk_sb.sb, &user_key);
+ if (ret) {
+ bch_err(c, "error requesting encryption key: %s", bch2_err_str(ret));
+ goto err;
+ }
+
+ ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
+ &key, sizeof(key));
+ if (ret)
+ goto err;
+ }
+
+ ret = crypto_skcipher_setkey(&c->chacha20->base,
+ (void *) &key.key, sizeof(key.key));
+ if (ret)
+ goto err;
+
+ crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
+ sizeof(*crypt) / sizeof(u64));
+ if (!crypt) {
+ ret = -BCH_ERR_ENOSPC_sb_crypt;
+ goto err;
+ }
+
+ crypt->key = key;
+
+ /* write superblock */
+ SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb.sb, 1);
+ bch2_write_super(c);
+err:
+ mutex_unlock(&c->sb_lock);
+ memzero_explicit(&user_key, sizeof(user_key));
+ memzero_explicit(&key, sizeof(key));
+ return ret;
+}
+
+void bch2_fs_encryption_exit(struct bch_fs *c)
+{
+ if (!IS_ERR_OR_NULL(c->poly1305))
+ crypto_free_shash(c->poly1305);
+ if (!IS_ERR_OR_NULL(c->chacha20))
+ crypto_free_sync_skcipher(c->chacha20);
+ if (!IS_ERR_OR_NULL(c->sha256))
+ crypto_free_shash(c->sha256);
+}
+
+int bch2_fs_encryption_init(struct bch_fs *c)
+{
+ struct bch_sb_field_crypt *crypt;
+ struct bch_key key;
+ int ret = 0;
+
+ c->sha256 = crypto_alloc_shash("sha256", 0, 0);
+ ret = PTR_ERR_OR_ZERO(c->sha256);
+ if (ret) {
+ bch_err(c, "error requesting sha256 module: %s", bch2_err_str(ret));
+ goto out;
+ }
+
+ crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
+ if (!crypt)
+ goto out;
+
+ ret = bch2_alloc_ciphers(c);
+ if (ret)
+ goto out;
+
+ ret = bch2_decrypt_sb_key(c, crypt, &key);
+ if (ret)
+ goto out;
+
+ ret = crypto_skcipher_setkey(&c->chacha20->base,
+ (void *) &key.key, sizeof(key.key));
+ if (ret)
+ goto out;
+out:
+ memzero_explicit(&key, sizeof(key));
+ return ret;
+}
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
new file mode 100644
index 000000000000..13998388c545
--- /dev/null
+++ b/fs/bcachefs/checksum.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_CHECKSUM_H
+#define _BCACHEFS_CHECKSUM_H
+
+#include "bcachefs.h"
+#include "extents_types.h"
+#include "super-io.h"
+
+#include <linux/crc64.h>
+#include <crypto/chacha.h>
+
+static inline bool bch2_checksum_mergeable(unsigned type)
+{
+
+ switch (type) {
+ case BCH_CSUM_none:
+ case BCH_CSUM_crc32c:
+ case BCH_CSUM_crc64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct bch_csum bch2_checksum_merge(unsigned, struct bch_csum,
+ struct bch_csum, size_t);
+
+#define BCH_NONCE_EXTENT cpu_to_le32(1 << 28)
+#define BCH_NONCE_BTREE cpu_to_le32(2 << 28)
+#define BCH_NONCE_JOURNAL cpu_to_le32(3 << 28)
+#define BCH_NONCE_PRIO cpu_to_le32(4 << 28)
+#define BCH_NONCE_POLY cpu_to_le32(1 << 31)
+
+struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce,
+ const void *, size_t);
+
+/*
+ * This is used for various on disk data structures - bch_sb, prio_set, bset,
+ * jset: The checksum is _always_ the first field of these structs
+ */
+#define csum_vstruct(_c, _type, _nonce, _i) \
+({ \
+ const void *_start = ((const void *) (_i)) + sizeof((_i)->csum);\
+ \
+ bch2_checksum(_c, _type, _nonce, _start, vstruct_end(_i) - _start);\
+})
+
+int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
+int bch2_request_key(struct bch_sb *, struct bch_key *);
+#ifndef __KERNEL__
+int bch2_revoke_key(struct bch_sb *);
+#endif
+
+int bch2_encrypt(struct bch_fs *, unsigned, struct nonce,
+ void *data, size_t);
+
+struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned,
+ struct nonce, struct bio *);
+
+int bch2_rechecksum_bio(struct bch_fs *, struct bio *, struct bversion,
+ struct bch_extent_crc_unpacked,
+ struct bch_extent_crc_unpacked *,
+ struct bch_extent_crc_unpacked *,
+ unsigned, unsigned, unsigned);
+
+int __bch2_encrypt_bio(struct bch_fs *, unsigned,
+ struct nonce, struct bio *);
+
+static inline int bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio)
+{
+ return bch2_csum_type_is_encryption(type)
+ ? __bch2_encrypt_bio(c, type, nonce, bio)
+ : 0;
+}
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_crypt;
+
+int bch2_decrypt_sb_key(struct bch_fs *, struct bch_sb_field_crypt *,
+ struct bch_key *);
+
+int bch2_disable_encryption(struct bch_fs *);
+int bch2_enable_encryption(struct bch_fs *, bool);
+
+void bch2_fs_encryption_exit(struct bch_fs *);
+int bch2_fs_encryption_init(struct bch_fs *);
+
+static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
+ bool data)
+{
+ switch (type) {
+ case BCH_CSUM_OPT_none:
+ return BCH_CSUM_none;
+ case BCH_CSUM_OPT_crc32c:
+ return data ? BCH_CSUM_crc32c : BCH_CSUM_crc32c_nonzero;
+ case BCH_CSUM_OPT_crc64:
+ return data ? BCH_CSUM_crc64 : BCH_CSUM_crc64_nonzero;
+ case BCH_CSUM_OPT_xxhash:
+ return BCH_CSUM_xxhash;
+ default:
+ BUG();
+ }
+}
+
+static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
+ struct bch_io_opts opts)
+{
+ if (opts.nocow)
+ return 0;
+
+ if (c->sb.encryption_type)
+ return c->opts.wide_macs
+ ? BCH_CSUM_chacha20_poly1305_128
+ : BCH_CSUM_chacha20_poly1305_80;
+
+ return bch2_csum_opt_to_type(opts.data_checksum, true);
+}
+
+static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
+{
+ if (c->sb.encryption_type)
+ return BCH_CSUM_chacha20_poly1305_128;
+
+ return bch2_csum_opt_to_type(c->opts.metadata_checksum, false);
+}
+
+static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
+ unsigned type)
+{
+ if (type >= BCH_CSUM_NR)
+ return false;
+
+ if (bch2_csum_type_is_encryption(type) && !c->chacha20)
+ return false;
+
+ return true;
+}
+
+/* returns true if not equal */
+static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
+{
+ /*
+ * XXX: need some way of preventing the compiler from optimizing this
+ * into a form that isn't constant time..
+ */
+ return ((l.lo ^ r.lo) | (l.hi ^ r.hi)) != 0;
+}
+
+/* for skipping ahead and encrypting/decrypting at an offset: */
+static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
+{
+ EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1));
+
+ le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE);
+ return nonce;
+}
+
+static inline struct nonce null_nonce(void)
+{
+ struct nonce ret;
+
+ memset(&ret, 0, sizeof(ret));
+ return ret;
+}
+
+static inline struct nonce extent_nonce(struct bversion version,
+ struct bch_extent_crc_unpacked crc)
+{
+ unsigned compression_type = crc_is_compressed(crc)
+ ? crc.compression_type
+ : 0;
+ unsigned size = compression_type ? crc.uncompressed_size : 0;
+ struct nonce nonce = (struct nonce) {{
+ [0] = cpu_to_le32(size << 22),
+ [1] = cpu_to_le32(version.lo),
+ [2] = cpu_to_le32(version.lo >> 32),
+ [3] = cpu_to_le32(version.hi|
+ (compression_type << 24))^BCH_NONCE_EXTENT,
+ }};
+
+ return nonce_add(nonce, crc.nonce << 9);
+}
+
+static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key)
+{
+ return le64_to_cpu(key->magic) != BCH_KEY_MAGIC;
+}
+
+static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb)
+{
+ __le64 magic = __bch2_sb_magic(sb);
+
+ return (struct nonce) {{
+ [0] = 0,
+ [1] = 0,
+ [2] = ((__le32 *) &magic)[0],
+ [3] = ((__le32 *) &magic)[1],
+ }};
+}
+
+static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c)
+{
+ __le64 magic = bch2_sb_magic(c);
+
+ return (struct nonce) {{
+ [0] = 0,
+ [1] = 0,
+ [2] = ((__le32 *) &magic)[0],
+ [3] = ((__le32 *) &magic)[1],
+ }};
+}
+
+#endif /* _BCACHEFS_CHECKSUM_H */
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
new file mode 100644
index 000000000000..f41889093a2c
--- /dev/null
+++ b/fs/bcachefs/clock.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "clock.h"
+
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/preempt.h>
+
+static inline long io_timer_cmp(io_timer_heap *h,
+ struct io_timer *l,
+ struct io_timer *r)
+{
+ return l->expire - r->expire;
+}
+
+void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
+{
+ size_t i;
+
+ spin_lock(&clock->timer_lock);
+
+ if (time_after_eq((unsigned long) atomic64_read(&clock->now),
+ timer->expire)) {
+ spin_unlock(&clock->timer_lock);
+ timer->fn(timer);
+ return;
+ }
+
+ for (i = 0; i < clock->timers.used; i++)
+ if (clock->timers.data[i] == timer)
+ goto out;
+
+ BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
+out:
+ spin_unlock(&clock->timer_lock);
+}
+
+void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
+{
+ size_t i;
+
+ spin_lock(&clock->timer_lock);
+
+ for (i = 0; i < clock->timers.used; i++)
+ if (clock->timers.data[i] == timer) {
+ heap_del(&clock->timers, i, io_timer_cmp, NULL);
+ break;
+ }
+
+ spin_unlock(&clock->timer_lock);
+}
+
+struct io_clock_wait {
+ struct io_timer io_timer;
+ struct timer_list cpu_timer;
+ struct task_struct *task;
+ int expired;
+};
+
+static void io_clock_wait_fn(struct io_timer *timer)
+{
+ struct io_clock_wait *wait = container_of(timer,
+ struct io_clock_wait, io_timer);
+
+ wait->expired = 1;
+ wake_up_process(wait->task);
+}
+
+static void io_clock_cpu_timeout(struct timer_list *timer)
+{
+ struct io_clock_wait *wait = container_of(timer,
+ struct io_clock_wait, cpu_timer);
+
+ wait->expired = 1;
+ wake_up_process(wait->task);
+}
+
+void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
+{
+ struct io_clock_wait wait;
+
+ /* XXX: calculate sleep time rigorously */
+ wait.io_timer.expire = until;
+ wait.io_timer.fn = io_clock_wait_fn;
+ wait.task = current;
+ wait.expired = 0;
+ bch2_io_timer_add(clock, &wait.io_timer);
+
+ schedule();
+
+ bch2_io_timer_del(clock, &wait.io_timer);
+}
+
+void bch2_kthread_io_clock_wait(struct io_clock *clock,
+ unsigned long io_until,
+ unsigned long cpu_timeout)
+{
+ bool kthread = (current->flags & PF_KTHREAD) != 0;
+ struct io_clock_wait wait;
+
+ wait.io_timer.expire = io_until;
+ wait.io_timer.fn = io_clock_wait_fn;
+ wait.task = current;
+ wait.expired = 0;
+ bch2_io_timer_add(clock, &wait.io_timer);
+
+ timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
+
+ if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
+ mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread && kthread_should_stop())
+ break;
+
+ if (wait.expired)
+ break;
+
+ schedule();
+ try_to_freeze();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ del_timer_sync(&wait.cpu_timer);
+ destroy_timer_on_stack(&wait.cpu_timer);
+ bch2_io_timer_del(clock, &wait.io_timer);
+}
+
+static struct io_timer *get_expired_timer(struct io_clock *clock,
+ unsigned long now)
+{
+ struct io_timer *ret = NULL;
+
+ spin_lock(&clock->timer_lock);
+
+ if (clock->timers.used &&
+ time_after_eq(now, clock->timers.data[0]->expire))
+ heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
+
+ spin_unlock(&clock->timer_lock);
+
+ return ret;
+}
+
+void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
+{
+ struct io_timer *timer;
+ unsigned long now = atomic64_add_return(sectors, &clock->now);
+
+ while ((timer = get_expired_timer(clock, now)))
+ timer->fn(timer);
+}
+
+void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
+{
+ unsigned long now;
+ unsigned i;
+
+ out->atomic++;
+ spin_lock(&clock->timer_lock);
+ now = atomic64_read(&clock->now);
+
+ for (i = 0; i < clock->timers.used; i++)
+ prt_printf(out, "%ps:\t%li\n",
+ clock->timers.data[i]->fn,
+ clock->timers.data[i]->expire - now);
+ spin_unlock(&clock->timer_lock);
+ --out->atomic;
+}
+
+void bch2_io_clock_exit(struct io_clock *clock)
+{
+ free_heap(&clock->timers);
+ free_percpu(clock->pcpu_buf);
+}
+
+int bch2_io_clock_init(struct io_clock *clock)
+{
+ atomic64_set(&clock->now, 0);
+ spin_lock_init(&clock->timer_lock);
+
+ clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
+
+ clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
+ if (!clock->pcpu_buf)
+ return -BCH_ERR_ENOMEM_io_clock_init;
+
+ if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
+ return -BCH_ERR_ENOMEM_io_clock_init;
+
+ return 0;
+}
diff --git a/fs/bcachefs/clock.h b/fs/bcachefs/clock.h
new file mode 100644
index 000000000000..70a0f7436c84
--- /dev/null
+++ b/fs/bcachefs/clock.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_CLOCK_H
+#define _BCACHEFS_CLOCK_H
+
+void bch2_io_timer_add(struct io_clock *, struct io_timer *);
+void bch2_io_timer_del(struct io_clock *, struct io_timer *);
+void bch2_kthread_io_clock_wait(struct io_clock *, unsigned long,
+ unsigned long);
+
+void __bch2_increment_clock(struct io_clock *, unsigned);
+
+static inline void bch2_increment_clock(struct bch_fs *c, unsigned sectors,
+ int rw)
+{
+ struct io_clock *clock = &c->io_clock[rw];
+
+ if (unlikely(this_cpu_add_return(*clock->pcpu_buf, sectors) >=
+ IO_CLOCK_PCPU_SECTORS))
+ __bch2_increment_clock(clock, this_cpu_xchg(*clock->pcpu_buf, 0));
+}
+
+void bch2_io_clock_schedule_timeout(struct io_clock *, unsigned long);
+
+#define bch2_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
+void bch2_io_timers_to_text(struct printbuf *, struct io_clock *);
+
+void bch2_io_clock_exit(struct io_clock *);
+int bch2_io_clock_init(struct io_clock *);
+
+#endif /* _BCACHEFS_CLOCK_H */
diff --git a/fs/bcachefs/clock_types.h b/fs/bcachefs/clock_types.h
new file mode 100644
index 000000000000..5fae0012d808
--- /dev/null
+++ b/fs/bcachefs/clock_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_CLOCK_TYPES_H
+#define _BCACHEFS_CLOCK_TYPES_H
+
+#include "util.h"
+
+#define NR_IO_TIMERS (BCH_SB_MEMBERS_MAX * 3)
+
+/*
+ * Clocks/timers in units of sectors of IO:
+ *
+ * Note - they use percpu batching, so they're only approximate.
+ */
+
+struct io_timer;
+typedef void (*io_timer_fn)(struct io_timer *);
+
+struct io_timer {
+ io_timer_fn fn;
+ unsigned long expire;
+};
+
+/* Amount to buffer up on a percpu counter */
+#define IO_CLOCK_PCPU_SECTORS 128
+
+typedef HEAP(struct io_timer *) io_timer_heap;
+
+struct io_clock {
+ atomic64_t now;
+ u16 __percpu *pcpu_buf;
+ unsigned max_slop;
+
+ spinlock_t timer_lock;
+ io_timer_heap timers;
+};
+
+#endif /* _BCACHEFS_CLOCK_TYPES_H */
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
new file mode 100644
index 000000000000..1480b64547b0
--- /dev/null
+++ b/fs/bcachefs/compress.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "checksum.h"
+#include "compress.h"
+#include "extents.h"
+#include "super-io.h"
+
+#include <linux/lz4.h>
+#include <linux/zlib.h>
+#include <linux/zstd.h>
+
+/* Bounce buffer: */
+struct bbuf {
+ void *b;
+ enum {
+ BB_NONE,
+ BB_VMAP,
+ BB_KMALLOC,
+ BB_MEMPOOL,
+ } type;
+ int rw;
+};
+
+static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw)
+{
+ void *b;
+
+ BUG_ON(size > c->opts.encoded_extent_max);
+
+ b = kmalloc(size, GFP_NOFS|__GFP_NOWARN);
+ if (b)
+ return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw };
+
+ b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS);
+ if (b)
+ return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw };
+
+ BUG();
+}
+
+static bool bio_phys_contig(struct bio *bio, struct bvec_iter start)
+{
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ void *expected_start = NULL;
+
+ __bio_for_each_bvec(bv, bio, iter, start) {
+ if (expected_start &&
+ expected_start != page_address(bv.bv_page) + bv.bv_offset)
+ return false;
+
+ expected_start = page_address(bv.bv_page) +
+ bv.bv_offset + bv.bv_len;
+ }
+
+ return true;
+}
+
+static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
+ struct bvec_iter start, int rw)
+{
+ struct bbuf ret;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+ unsigned nr_pages = 0;
+ struct page *stack_pages[16];
+ struct page **pages = NULL;
+ void *data;
+
+ BUG_ON(start.bi_size > c->opts.encoded_extent_max);
+
+ if (!PageHighMem(bio_iter_page(bio, start)) &&
+ bio_phys_contig(bio, start))
+ return (struct bbuf) {
+ .b = page_address(bio_iter_page(bio, start)) +
+ bio_iter_offset(bio, start),
+ .type = BB_NONE, .rw = rw
+ };
+
+ /* check if we can map the pages contiguously: */
+ __bio_for_each_segment(bv, bio, iter, start) {
+ if (iter.bi_size != start.bi_size &&
+ bv.bv_offset)
+ goto bounce;
+
+ if (bv.bv_len < iter.bi_size &&
+ bv.bv_offset + bv.bv_len < PAGE_SIZE)
+ goto bounce;
+
+ nr_pages++;
+ }
+
+ BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages);
+
+ pages = nr_pages > ARRAY_SIZE(stack_pages)
+ ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS)
+ : stack_pages;
+ if (!pages)
+ goto bounce;
+
+ nr_pages = 0;
+ __bio_for_each_segment(bv, bio, iter, start)
+ pages[nr_pages++] = bv.bv_page;
+
+ data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (pages != stack_pages)
+ kfree(pages);
+
+ if (data)
+ return (struct bbuf) {
+ .b = data + bio_iter_offset(bio, start),
+ .type = BB_VMAP, .rw = rw
+ };
+bounce:
+ ret = __bounce_alloc(c, start.bi_size, rw);
+
+ if (rw == READ)
+ memcpy_from_bio(ret.b, bio, start);
+
+ return ret;
+}
+
+static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw)
+{
+ return __bio_map_or_bounce(c, bio, bio->bi_iter, rw);
+}
+
+static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
+{
+ switch (buf.type) {
+ case BB_NONE:
+ break;
+ case BB_VMAP:
+ vunmap((void *) ((unsigned long) buf.b & PAGE_MASK));
+ break;
+ case BB_KMALLOC:
+ kfree(buf.b);
+ break;
+ case BB_MEMPOOL:
+ mempool_free(buf.b, &c->compression_bounce[buf.rw]);
+ break;
+ }
+}
+
+static inline void zlib_set_workspace(z_stream *strm, void *workspace)
+{
+#ifdef __KERNEL__
+ strm->workspace = workspace;
+#endif
+}
+
+static int __bio_uncompress(struct bch_fs *c, struct bio *src,
+ void *dst_data, struct bch_extent_crc_unpacked crc)
+{
+ struct bbuf src_data = { NULL };
+ size_t src_len = src->bi_iter.bi_size;
+ size_t dst_len = crc.uncompressed_size << 9;
+ void *workspace;
+ int ret;
+
+ src_data = bio_map_or_bounce(c, src, READ);
+
+ switch (crc.compression_type) {
+ case BCH_COMPRESSION_TYPE_lz4_old:
+ case BCH_COMPRESSION_TYPE_lz4:
+ ret = LZ4_decompress_safe_partial(src_data.b, dst_data,
+ src_len, dst_len, dst_len);
+ if (ret != dst_len)
+ goto err;
+ break;
+ case BCH_COMPRESSION_TYPE_gzip: {
+ z_stream strm = {
+ .next_in = src_data.b,
+ .avail_in = src_len,
+ .next_out = dst_data,
+ .avail_out = dst_len,
+ };
+
+ workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
+
+ zlib_set_workspace(&strm, workspace);
+ zlib_inflateInit2(&strm, -MAX_WBITS);
+ ret = zlib_inflate(&strm, Z_FINISH);
+
+ mempool_free(workspace, &c->decompress_workspace);
+
+ if (ret != Z_STREAM_END)
+ goto err;
+ break;
+ }
+ case BCH_COMPRESSION_TYPE_zstd: {
+ ZSTD_DCtx *ctx;
+ size_t real_src_len = le32_to_cpup(src_data.b);
+
+ if (real_src_len > src_len - 4)
+ goto err;
+
+ workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS);
+ ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound());
+
+ ret = zstd_decompress_dctx(ctx,
+ dst_data, dst_len,
+ src_data.b + 4, real_src_len);
+
+ mempool_free(workspace, &c->decompress_workspace);
+
+ if (ret != dst_len)
+ goto err;
+ break;
+ }
+ default:
+ BUG();
+ }
+ ret = 0;
+out:
+ bio_unmap_or_unbounce(c, src_data);
+ return ret;
+err:
+ ret = -EIO;
+ goto out;
+}
+
+int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
+ struct bch_extent_crc_unpacked *crc)
+{
+ struct bbuf data = { NULL };
+ size_t dst_len = crc->uncompressed_size << 9;
+
+ /* bio must own its pages: */
+ BUG_ON(!bio->bi_vcnt);
+ BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs);
+
+ if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max ||
+ crc->compressed_size << 9 > c->opts.encoded_extent_max) {
+ bch_err(c, "error rewriting existing data: extent too big");
+ return -EIO;
+ }
+
+ data = __bounce_alloc(c, dst_len, WRITE);
+
+ if (__bio_uncompress(c, bio, data.b, *crc)) {
+ if (!c->opts.no_data_io)
+ bch_err(c, "error rewriting existing data: decompression error");
+ bio_unmap_or_unbounce(c, data);
+ return -EIO;
+ }
+
+ /*
+ * XXX: don't have a good way to assert that the bio was allocated with
+ * enough space, we depend on bch2_move_extent doing the right thing
+ */
+ bio->bi_iter.bi_size = crc->live_size << 9;
+
+ memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
+
+ crc->csum_type = 0;
+ crc->compression_type = 0;
+ crc->compressed_size = crc->live_size;
+ crc->uncompressed_size = crc->live_size;
+ crc->offset = 0;
+ crc->csum = (struct bch_csum) { 0, 0 };
+
+ bio_unmap_or_unbounce(c, data);
+ return 0;
+}
+
+int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
+ struct bio *dst, struct bvec_iter dst_iter,
+ struct bch_extent_crc_unpacked crc)
+{
+ struct bbuf dst_data = { NULL };
+ size_t dst_len = crc.uncompressed_size << 9;
+ int ret;
+
+ if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max ||
+ crc.compressed_size << 9 > c->opts.encoded_extent_max)
+ return -EIO;
+
+ dst_data = dst_len == dst_iter.bi_size
+ ? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
+ : __bounce_alloc(c, dst_len, WRITE);
+
+ ret = __bio_uncompress(c, src, dst_data.b, crc);
+ if (ret)
+ goto err;
+
+ if (dst_data.type != BB_NONE &&
+ dst_data.type != BB_VMAP)
+ memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9));
+err:
+ bio_unmap_or_unbounce(c, dst_data);
+ return ret;
+}
+
+static int attempt_compress(struct bch_fs *c,
+ void *workspace,
+ void *dst, size_t dst_len,
+ void *src, size_t src_len,
+ struct bch_compression_opt compression)
+{
+ enum bch_compression_type compression_type =
+ __bch2_compression_opt_to_type[compression.type];
+
+ switch (compression_type) {
+ case BCH_COMPRESSION_TYPE_lz4:
+ if (compression.level < LZ4HC_MIN_CLEVEL) {
+ int len = src_len;
+ int ret = LZ4_compress_destSize(
+ src, dst,
+ &len, dst_len,
+ workspace);
+ if (len < src_len)
+ return -len;
+
+ return ret;
+ } else {
+ int ret = LZ4_compress_HC(
+ src, dst,
+ src_len, dst_len,
+ compression.level,
+ workspace);
+
+ return ret ?: -1;
+ }
+ case BCH_COMPRESSION_TYPE_gzip: {
+ z_stream strm = {
+ .next_in = src,
+ .avail_in = src_len,
+ .next_out = dst,
+ .avail_out = dst_len,
+ };
+
+ zlib_set_workspace(&strm, workspace);
+ zlib_deflateInit2(&strm,
+ compression.level
+ ? clamp_t(unsigned, compression.level,
+ Z_BEST_SPEED, Z_BEST_COMPRESSION)
+ : Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
+ Z_DEFAULT_STRATEGY);
+
+ if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
+ return 0;
+
+ if (zlib_deflateEnd(&strm) != Z_OK)
+ return 0;
+
+ return strm.total_out;
+ }
+ case BCH_COMPRESSION_TYPE_zstd: {
+ /*
+ * rescale:
+ * zstd max compression level is 22, our max level is 15
+ */
+ unsigned level = min((compression.level * 3) / 2, zstd_max_clevel());
+ ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max);
+ ZSTD_CCtx *ctx = zstd_init_cctx(workspace,
+ zstd_cctx_workspace_bound(&params.cParams));
+
+ /*
+ * ZSTD requires that when we decompress we pass in the exact
+ * compressed size - rounding it up to the nearest sector
+ * doesn't work, so we use the first 4 bytes of the buffer for
+ * that.
+ *
+ * Additionally, the ZSTD code seems to have a bug where it will
+ * write just past the end of the buffer - so subtract a fudge
+ * factor (7 bytes) from the dst buffer size to account for
+ * that.
+ */
+ size_t len = zstd_compress_cctx(ctx,
+ dst + 4, dst_len - 4 - 7,
+ src, src_len,
+ &c->zstd_params);
+ if (zstd_is_error(len))
+ return 0;
+
+ *((__le32 *) dst) = cpu_to_le32(len);
+ return len + 4;
+ }
+ default:
+ BUG();
+ }
+}
+
+static unsigned __bio_compress(struct bch_fs *c,
+ struct bio *dst, size_t *dst_len,
+ struct bio *src, size_t *src_len,
+ struct bch_compression_opt compression)
+{
+ struct bbuf src_data = { NULL }, dst_data = { NULL };
+ void *workspace;
+ enum bch_compression_type compression_type =
+ __bch2_compression_opt_to_type[compression.type];
+ unsigned pad;
+ int ret = 0;
+
+ BUG_ON(compression_type >= BCH_COMPRESSION_TYPE_NR);
+ BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type]));
+
+ /* If it's only one block, don't bother trying to compress: */
+ if (src->bi_iter.bi_size <= c->opts.block_size)
+ return BCH_COMPRESSION_TYPE_incompressible;
+
+ dst_data = bio_map_or_bounce(c, dst, WRITE);
+ src_data = bio_map_or_bounce(c, src, READ);
+
+ workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS);
+
+ *src_len = src->bi_iter.bi_size;
+ *dst_len = dst->bi_iter.bi_size;
+
+ /*
+ * XXX: this algorithm sucks when the compression code doesn't tell us
+ * how much would fit, like LZ4 does:
+ */
+ while (1) {
+ if (*src_len <= block_bytes(c)) {
+ ret = -1;
+ break;
+ }
+
+ ret = attempt_compress(c, workspace,
+ dst_data.b, *dst_len,
+ src_data.b, *src_len,
+ compression);
+ if (ret > 0) {
+ *dst_len = ret;
+ ret = 0;
+ break;
+ }
+
+ /* Didn't fit: should we retry with a smaller amount? */
+ if (*src_len <= *dst_len) {
+ ret = -1;
+ break;
+ }
+
+ /*
+ * If ret is negative, it's a hint as to how much data would fit
+ */
+ BUG_ON(-ret >= *src_len);
+
+ if (ret < 0)
+ *src_len = -ret;
+ else
+ *src_len -= (*src_len - *dst_len) / 2;
+ *src_len = round_down(*src_len, block_bytes(c));
+ }
+
+ mempool_free(workspace, &c->compress_workspace[compression_type]);
+
+ if (ret)
+ goto err;
+
+ /* Didn't get smaller: */
+ if (round_up(*dst_len, block_bytes(c)) >= *src_len)
+ goto err;
+
+ pad = round_up(*dst_len, block_bytes(c)) - *dst_len;
+
+ memset(dst_data.b + *dst_len, 0, pad);
+ *dst_len += pad;
+
+ if (dst_data.type != BB_NONE &&
+ dst_data.type != BB_VMAP)
+ memcpy_to_bio(dst, dst->bi_iter, dst_data.b);
+
+ BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size);
+ BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size);
+ BUG_ON(*dst_len & (block_bytes(c) - 1));
+ BUG_ON(*src_len & (block_bytes(c) - 1));
+ ret = compression_type;
+out:
+ bio_unmap_or_unbounce(c, src_data);
+ bio_unmap_or_unbounce(c, dst_data);
+ return ret;
+err:
+ ret = BCH_COMPRESSION_TYPE_incompressible;
+ goto out;
+}
+
+unsigned bch2_bio_compress(struct bch_fs *c,
+ struct bio *dst, size_t *dst_len,
+ struct bio *src, size_t *src_len,
+ unsigned compression_opt)
+{
+ unsigned orig_dst = dst->bi_iter.bi_size;
+ unsigned orig_src = src->bi_iter.bi_size;
+ unsigned compression_type;
+
+ /* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */
+ src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size,
+ c->opts.encoded_extent_max);
+ /* Don't generate a bigger output than input: */
+ dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
+
+ compression_type =
+ __bio_compress(c, dst, dst_len, src, src_len,
+ bch2_compression_decode(compression_opt));
+
+ dst->bi_iter.bi_size = orig_dst;
+ src->bi_iter.bi_size = orig_src;
+ return compression_type;
+}
+
+static int __bch2_fs_compress_init(struct bch_fs *, u64);
+
+#define BCH_FEATURE_none 0
+
+static const unsigned bch2_compression_opt_to_feature[] = {
+#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t,
+ BCH_COMPRESSION_OPTS()
+#undef x
+};
+
+#undef BCH_FEATURE_none
+
+static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f)
+{
+ int ret = 0;
+
+ if ((c->sb.features & f) == f)
+ return 0;
+
+ mutex_lock(&c->sb_lock);
+
+ if ((c->sb.features & f) == f) {
+ mutex_unlock(&c->sb_lock);
+ return 0;
+ }
+
+ ret = __bch2_fs_compress_init(c, c->sb.features|f);
+ if (ret) {
+ mutex_unlock(&c->sb_lock);
+ return ret;
+ }
+
+ c->disk_sb.sb->features[0] |= cpu_to_le64(f);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ return 0;
+}
+
+int bch2_check_set_has_compressed_data(struct bch_fs *c,
+ unsigned compression_opt)
+{
+ unsigned compression_type = bch2_compression_decode(compression_opt).type;
+
+ BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature));
+
+ return compression_type
+ ? __bch2_check_set_has_compressed_data(c,
+ 1ULL << bch2_compression_opt_to_feature[compression_type])
+ : 0;
+}
+
+void bch2_fs_compress_exit(struct bch_fs *c)
+{
+ unsigned i;
+
+ mempool_exit(&c->decompress_workspace);
+ for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++)
+ mempool_exit(&c->compress_workspace[i]);
+ mempool_exit(&c->compression_bounce[WRITE]);
+ mempool_exit(&c->compression_bounce[READ]);
+}
+
+static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
+{
+ size_t decompress_workspace_size = 0;
+ ZSTD_parameters params = zstd_get_params(zstd_max_clevel(),
+ c->opts.encoded_extent_max);
+ struct {
+ unsigned feature;
+ enum bch_compression_type type;
+ size_t compress_workspace;
+ size_t decompress_workspace;
+ } compression_types[] = {
+ { BCH_FEATURE_lz4, BCH_COMPRESSION_TYPE_lz4,
+ max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS),
+ 0 },
+ { BCH_FEATURE_gzip, BCH_COMPRESSION_TYPE_gzip,
+ zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL),
+ zlib_inflate_workspacesize(), },
+ { BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd,
+ zstd_cctx_workspace_bound(&params.cParams),
+ zstd_dctx_workspace_bound() },
+ }, *i;
+ bool have_compressed = false;
+
+ c->zstd_params = params;
+
+ for (i = compression_types;
+ i < compression_types + ARRAY_SIZE(compression_types);
+ i++)
+ have_compressed |= (features & (1 << i->feature)) != 0;
+
+ if (!have_compressed)
+ return 0;
+
+ if (!mempool_initialized(&c->compression_bounce[READ]) &&
+ mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
+ 1, c->opts.encoded_extent_max))
+ return -BCH_ERR_ENOMEM_compression_bounce_read_init;
+
+ if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
+ mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
+ 1, c->opts.encoded_extent_max))
+ return -BCH_ERR_ENOMEM_compression_bounce_write_init;
+
+ for (i = compression_types;
+ i < compression_types + ARRAY_SIZE(compression_types);
+ i++) {
+ decompress_workspace_size =
+ max(decompress_workspace_size, i->decompress_workspace);
+
+ if (!(features & (1 << i->feature)))
+ continue;
+
+ if (mempool_initialized(&c->compress_workspace[i->type]))
+ continue;
+
+ if (mempool_init_kvpmalloc_pool(
+ &c->compress_workspace[i->type],
+ 1, i->compress_workspace))
+ return -BCH_ERR_ENOMEM_compression_workspace_init;
+ }
+
+ if (!mempool_initialized(&c->decompress_workspace) &&
+ mempool_init_kvpmalloc_pool(&c->decompress_workspace,
+ 1, decompress_workspace_size))
+ return -BCH_ERR_ENOMEM_decompression_workspace_init;
+
+ return 0;
+}
+
+static u64 compression_opt_to_feature(unsigned v)
+{
+ unsigned type = bch2_compression_decode(v).type;
+
+ return BIT_ULL(bch2_compression_opt_to_feature[type]);
+}
+
+int bch2_fs_compress_init(struct bch_fs *c)
+{
+ u64 f = c->sb.features;
+
+ f |= compression_opt_to_feature(c->opts.compression);
+ f |= compression_opt_to_feature(c->opts.background_compression);
+
+ return __bch2_fs_compress_init(c, f);
+}
+
+int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res,
+ struct printbuf *err)
+{
+ char *val = kstrdup(_val, GFP_KERNEL);
+ char *p = val, *type_str, *level_str;
+ struct bch_compression_opt opt = { 0 };
+ int ret;
+
+ if (!val)
+ return -ENOMEM;
+
+ type_str = strsep(&p, ":");
+ level_str = p;
+
+ ret = match_string(bch2_compression_opts, -1, type_str);
+ if (ret < 0 && err)
+ prt_str(err, "invalid compression type");
+ if (ret < 0)
+ goto err;
+
+ opt.type = ret;
+
+ if (level_str) {
+ unsigned level;
+
+ ret = kstrtouint(level_str, 10, &level);
+ if (!ret && !opt.type && level)
+ ret = -EINVAL;
+ if (!ret && level > 15)
+ ret = -EINVAL;
+ if (ret < 0 && err)
+ prt_str(err, "invalid compression level");
+ if (ret < 0)
+ goto err;
+
+ opt.level = level;
+ }
+
+ *res = bch2_compression_encode(opt);
+err:
+ kfree(val);
+ return ret;
+}
+
+void bch2_opt_compression_to_text(struct printbuf *out,
+ struct bch_fs *c,
+ struct bch_sb *sb,
+ u64 v)
+{
+ struct bch_compression_opt opt = bch2_compression_decode(v);
+
+ prt_str(out, bch2_compression_opts[opt.type]);
+ if (opt.level)
+ prt_printf(out, ":%u", opt.level);
+}
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
new file mode 100644
index 000000000000..052ea303241f
--- /dev/null
+++ b/fs/bcachefs/compress.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_COMPRESS_H
+#define _BCACHEFS_COMPRESS_H
+
+#include "extents_types.h"
+
+struct bch_compression_opt {
+ u8 type:4,
+ level:4;
+};
+
+static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
+{
+ return (struct bch_compression_opt) {
+ .type = v & 15,
+ .level = v >> 4,
+ };
+}
+
+static inline unsigned bch2_compression_encode(struct bch_compression_opt opt)
+{
+ return opt.type|(opt.level << 4);
+}
+
+static const unsigned __bch2_compression_opt_to_type[] = {
+#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_COMPRESSION_TYPE_##t,
+ BCH_COMPRESSION_OPTS()
+#undef x
+};
+
+static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
+{
+ return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
+}
+
+int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
+ struct bch_extent_crc_unpacked *);
+int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
+ struct bvec_iter, struct bch_extent_crc_unpacked);
+unsigned bch2_bio_compress(struct bch_fs *, struct bio *, size_t *,
+ struct bio *, size_t *, unsigned);
+
+int bch2_check_set_has_compressed_data(struct bch_fs *, unsigned);
+void bch2_fs_compress_exit(struct bch_fs *);
+int bch2_fs_compress_init(struct bch_fs *);
+
+int bch2_opt_compression_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
+void bch2_opt_compression_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
+
+#define bch2_opt_compression (struct bch_opt_fn) { \
+ .parse = bch2_opt_compression_parse, \
+ .to_text = bch2_opt_compression_to_text, \
+}
+
+#endif /* _BCACHEFS_COMPRESS_H */
diff --git a/fs/bcachefs/counters.c b/fs/bcachefs/counters.c
new file mode 100644
index 000000000000..02a996e06a64
--- /dev/null
+++ b/fs/bcachefs/counters.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "super-io.h"
+#include "counters.h"
+
+/* BCH_SB_FIELD_counters */
+
+static const char * const bch2_counter_names[] = {
+#define x(t, n, ...) (#t),
+ BCH_PERSISTENT_COUNTERS()
+#undef x
+ NULL
+};
+
+static size_t bch2_sb_counter_nr_entries(struct bch_sb_field_counters *ctrs)
+{
+ if (!ctrs)
+ return 0;
+
+ return (__le64 *) vstruct_end(&ctrs->field) - &ctrs->d[0];
+};
+
+static int bch2_sb_counters_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ return 0;
+};
+
+static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
+ unsigned int i;
+ unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
+
+ for (i = 0; i < nr; i++) {
+ if (i < BCH_COUNTER_NR)
+ prt_printf(out, "%s ", bch2_counter_names[i]);
+ else
+ prt_printf(out, "(unknown)");
+
+ prt_tab(out);
+ prt_printf(out, "%llu", le64_to_cpu(ctrs->d[i]));
+ prt_newline(out);
+ }
+};
+
+int bch2_sb_counters_to_cpu(struct bch_fs *c)
+{
+ struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
+ unsigned int i;
+ unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
+ u64 val = 0;
+
+ for (i = 0; i < BCH_COUNTER_NR; i++)
+ c->counters_on_mount[i] = 0;
+
+ for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++) {
+ val = le64_to_cpu(ctrs->d[i]);
+ percpu_u64_set(&c->counters[i], val);
+ c->counters_on_mount[i] = val;
+ }
+ return 0;
+};
+
+int bch2_sb_counters_from_cpu(struct bch_fs *c)
+{
+ struct bch_sb_field_counters *ctrs = bch2_sb_field_get(c->disk_sb.sb, counters);
+ struct bch_sb_field_counters *ret;
+ unsigned int i;
+ unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
+
+ if (nr < BCH_COUNTER_NR) {
+ ret = bch2_sb_field_resize(&c->disk_sb, counters,
+ sizeof(*ctrs) / sizeof(u64) + BCH_COUNTER_NR);
+
+ if (ret) {
+ ctrs = ret;
+ nr = bch2_sb_counter_nr_entries(ctrs);
+ }
+ }
+
+
+ for (i = 0; i < min_t(unsigned int, nr, BCH_COUNTER_NR); i++)
+ ctrs->d[i] = cpu_to_le64(percpu_u64_get(&c->counters[i]));
+ return 0;
+}
+
+void bch2_fs_counters_exit(struct bch_fs *c)
+{
+ free_percpu(c->counters);
+}
+
+int bch2_fs_counters_init(struct bch_fs *c)
+{
+ c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
+ if (!c->counters)
+ return -BCH_ERR_ENOMEM_fs_counters_init;
+
+ return bch2_sb_counters_to_cpu(c);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_counters = {
+ .validate = bch2_sb_counters_validate,
+ .to_text = bch2_sb_counters_to_text,
+};
diff --git a/fs/bcachefs/counters.h b/fs/bcachefs/counters.h
new file mode 100644
index 000000000000..4778aa19bf34
--- /dev/null
+++ b/fs/bcachefs/counters.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_COUNTERS_H
+#define _BCACHEFS_COUNTERS_H
+
+#include "bcachefs.h"
+#include "super-io.h"
+
+
+int bch2_sb_counters_to_cpu(struct bch_fs *);
+int bch2_sb_counters_from_cpu(struct bch_fs *);
+
+void bch2_fs_counters_exit(struct bch_fs *);
+int bch2_fs_counters_init(struct bch_fs *);
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_counters;
+
+#endif // _BCACHEFS_COUNTERS_H
diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h
new file mode 100644
index 000000000000..114f86b45fd5
--- /dev/null
+++ b/fs/bcachefs/darray.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DARRAY_H
+#define _BCACHEFS_DARRAY_H
+
+/*
+ * Dynamic arrays:
+ *
+ * Inspired by CCAN's darray
+ */
+
+#include "util.h"
+#include <linux/slab.h>
+
+#define DARRAY(type) \
+struct { \
+ size_t nr, size; \
+ type *data; \
+}
+
+typedef DARRAY(void) darray_void;
+
+static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more, gfp_t gfp)
+{
+ if (d->nr + more > d->size) {
+ size_t new_size = roundup_pow_of_two(d->nr + more);
+ void *data = krealloc_array(d->data, new_size, t_size, gfp);
+
+ if (!data)
+ return -ENOMEM;
+
+ d->data = data;
+ d->size = new_size;
+ }
+
+ return 0;
+}
+
+#define darray_make_room_gfp(_d, _more, _gfp) \
+ __darray_make_room((darray_void *) (_d), sizeof((_d)->data[0]), (_more), _gfp)
+
+#define darray_make_room(_d, _more) \
+ darray_make_room_gfp(_d, _more, GFP_KERNEL)
+
+#define darray_top(_d) ((_d).data[(_d).nr])
+
+#define darray_push_gfp(_d, _item, _gfp) \
+({ \
+ int _ret = darray_make_room_gfp((_d), 1, _gfp); \
+ \
+ if (!_ret) \
+ (_d)->data[(_d)->nr++] = (_item); \
+ _ret; \
+})
+
+#define darray_push(_d, _item) darray_push_gfp(_d, _item, GFP_KERNEL)
+
+#define darray_pop(_d) ((_d)->data[--(_d)->nr])
+
+#define darray_first(_d) ((_d).data[0])
+#define darray_last(_d) ((_d).data[(_d).nr - 1])
+
+#define darray_insert_item(_d, pos, _item) \
+({ \
+ size_t _pos = (pos); \
+ int _ret = darray_make_room((_d), 1); \
+ \
+ if (!_ret) \
+ array_insert_item((_d)->data, (_d)->nr, _pos, (_item)); \
+ _ret; \
+})
+
+#define darray_for_each(_d, _i) \
+ for (_i = (_d).data; _i < (_d).data + (_d).nr; _i++)
+
+#define darray_init(_d) \
+do { \
+ (_d)->data = NULL; \
+ (_d)->nr = (_d)->size = 0; \
+} while (0)
+
+#define darray_exit(_d) \
+do { \
+ kfree((_d)->data); \
+ darray_init(_d); \
+} while (0)
+
+#endif /* _BCACHEFS_DARRAY_H */
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
new file mode 100644
index 000000000000..899ff46de8e0
--- /dev/null
+++ b/fs/bcachefs/data_update.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "data_update.h"
+#include "ec.h"
+#include "error.h"
+#include "extents.h"
+#include "io_write.h"
+#include "keylist.h"
+#include "move.h"
+#include "nocow_locking.h"
+#include "subvolume.h"
+#include "trace.h"
+
+static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_finish_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent_finish(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
+static void trace_move_extent_fail2(struct data_update *m,
+ struct bkey_s_c new,
+ struct bkey_s_c wrote,
+ struct bkey_i *insert,
+ const char *msg)
+{
+ struct bch_fs *c = m->op.c;
+ struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_ptr *ptr;
+ struct extent_ptr_decoded p;
+ struct printbuf buf = PRINTBUF;
+ unsigned i, rewrites_found = 0;
+
+ if (!trace_move_extent_fail_enabled())
+ return;
+
+ prt_str(&buf, msg);
+
+ if (insert) {
+ i = 0;
+ bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
+ if (((1U << i) & m->data_opts.rewrite_ptrs) &&
+ (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
+ !ptr->cached)
+ rewrites_found |= 1U << i;
+ i++;
+ }
+ }
+
+ prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
+ (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
+ (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
+ (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
+ (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
+
+ prt_printf(&buf, "\nrewrites found: %u%u%u%u",
+ (rewrites_found & (1 << 0)) != 0,
+ (rewrites_found & (1 << 1)) != 0,
+ (rewrites_found & (1 << 2)) != 0,
+ (rewrites_found & (1 << 3)) != 0);
+
+ prt_str(&buf, "\nold: ");
+ bch2_bkey_val_to_text(&buf, c, old);
+
+ prt_str(&buf, "\nnew: ");
+ bch2_bkey_val_to_text(&buf, c, new);
+
+ prt_str(&buf, "\nwrote: ");
+ bch2_bkey_val_to_text(&buf, c, wrote);
+
+ if (insert) {
+ prt_str(&buf, "\ninsert: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
+ }
+
+ trace_move_extent_fail(c, buf.buf);
+ printbuf_exit(&buf);
+}
+
+static int __bch2_data_update_index_update(struct btree_trans *trans,
+ struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct btree_iter iter;
+ struct data_update *m =
+ container_of(op, struct data_update, op);
+ struct keylist *keys = &op->insert_keys;
+ struct bkey_buf _new, _insert;
+ int ret = 0;
+
+ bch2_bkey_buf_init(&_new);
+ bch2_bkey_buf_init(&_insert);
+ bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
+
+ bch2_trans_iter_init(trans, &iter, m->btree_id,
+ bkey_start_pos(&bch2_keylist_front(keys)->k),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+
+ while (1) {
+ struct bkey_s_c k;
+ struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
+ struct bkey_i *insert = NULL;
+ struct bkey_i_extent *new;
+ const union bch_extent_entry *entry_c;
+ union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct bch_extent_ptr *ptr;
+ const struct bch_extent_ptr *ptr_c;
+ struct bpos next_pos;
+ bool should_check_enospc;
+ s64 i_sectors_delta = 0, disk_sectors_delta = 0;
+ unsigned rewrites_found = 0, durability, i;
+
+ bch2_trans_begin(trans);
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ new = bkey_i_to_extent(bch2_keylist_front(keys));
+
+ if (!bch2_extents_match(k, old)) {
+ trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
+ NULL, "no match:");
+ goto nowork;
+ }
+
+ bkey_reassemble(_insert.k, k);
+ insert = _insert.k;
+
+ bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
+ new = bkey_i_to_extent(_new.k);
+ bch2_cut_front(iter.pos, &new->k_i);
+
+ bch2_cut_front(iter.pos, insert);
+ bch2_cut_back(new->k.p, insert);
+ bch2_cut_back(insert->k.p, &new->k_i);
+
+ /*
+ * @old: extent that we read from
+ * @insert: key that we're going to update, initialized from
+ * extent currently in btree - same as @old unless we raced with
+ * other updates
+ * @new: extent with new pointers that we'll be adding to @insert
+ *
+ * Fist, drop rewrite_ptrs from @new:
+ */
+ i = 0;
+ bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
+ if (((1U << i) & m->data_opts.rewrite_ptrs) &&
+ (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
+ !ptr->cached) {
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
+ /*
+ * See comment below:
+ bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
+ */
+ rewrites_found |= 1U << i;
+ }
+ i++;
+ }
+
+ if (m->data_opts.rewrite_ptrs &&
+ !rewrites_found &&
+ bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
+ trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
+ goto nowork;
+ }
+
+ /*
+ * A replica that we just wrote might conflict with a replica
+ * that we want to keep, due to racing with another move:
+ */
+restart_drop_conflicting_replicas:
+ extent_for_each_ptr(extent_i_to_s(new), ptr)
+ if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
+ !ptr_c->cached) {
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
+ goto restart_drop_conflicting_replicas;
+ }
+
+ if (!bkey_val_u64s(&new->k)) {
+ trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
+ goto nowork;
+ }
+
+ /* Now, drop pointers that conflict with what we just wrote: */
+ extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
+ if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
+
+ durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
+ bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
+
+ /* Now, drop excess replicas: */
+restart_drop_extra_replicas:
+ bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
+ unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
+
+ if (!p.ptr.cached &&
+ durability - ptr_durability >= m->op.opts.data_replicas) {
+ durability -= ptr_durability;
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
+ /*
+ * Currently, we're dropping unneeded replicas
+ * instead of marking them as cached, since
+ * cached data in stripe buckets prevents them
+ * from being reused:
+ bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
+ */
+ goto restart_drop_extra_replicas;
+ }
+ }
+
+ /* Finally, add the pointers we just wrote: */
+ extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
+ bch2_extent_ptr_decoded_append(insert, &p);
+
+ bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
+ bch2_extent_normalize(c, bkey_i_to_s(insert));
+
+ ret = bch2_sum_sector_overwrites(trans, &iter, insert,
+ &should_check_enospc,
+ &i_sectors_delta,
+ &disk_sectors_delta);
+ if (ret)
+ goto err;
+
+ if (disk_sectors_delta > (s64) op->res.sectors) {
+ ret = bch2_disk_reservation_add(c, &op->res,
+ disk_sectors_delta - op->res.sectors,
+ !should_check_enospc
+ ? BCH_DISK_RESERVATION_NOFAIL : 0);
+ if (ret)
+ goto out;
+ }
+
+ next_pos = insert->k.p;
+
+ ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
+ k.k->p, bkey_start_pos(&insert->k)) ?:
+ bch2_insert_snapshot_whiteouts(trans, m->btree_id,
+ k.k->p, insert->k.p);
+ if (ret)
+ goto err;
+
+ ret = bch2_trans_update(trans, &iter, insert,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_trans_commit(trans, &op->res,
+ NULL,
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL|
+ m->data_opts.btree_insert_flags);
+ if (!ret) {
+ bch2_btree_iter_set_pos(&iter, next_pos);
+
+ this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
+ trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
+ }
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
+ if (ret)
+ break;
+next:
+ while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
+ bch2_keylist_pop_front(keys);
+ if (bch2_keylist_empty(keys))
+ goto out;
+ }
+ continue;
+nowork:
+ if (m->ctxt && m->ctxt->stats) {
+ BUG_ON(k.k->p.offset <= iter.pos.offset);
+ atomic64_inc(&m->ctxt->stats->keys_raced);
+ atomic64_add(k.k->p.offset - iter.pos.offset,
+ &m->ctxt->stats->sectors_raced);
+ }
+
+ this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
+
+ bch2_btree_iter_advance(&iter);
+ goto next;
+ }
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_bkey_buf_exit(&_insert, c);
+ bch2_bkey_buf_exit(&_new, c);
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
+ return ret;
+}
+
+int bch2_data_update_index_update(struct bch_write_op *op)
+{
+ return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
+}
+
+void bch2_data_update_read_done(struct data_update *m,
+ struct bch_extent_crc_unpacked crc)
+{
+ /* write bio must own pages: */
+ BUG_ON(!m->op.wbio.bio.bi_vcnt);
+
+ m->op.crc = crc;
+ m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
+
+ closure_call(&m->op.cl, bch2_write, NULL, NULL);
+}
+
+void bch2_data_update_exit(struct data_update *update)
+{
+ struct bch_fs *c = update->op.c;
+ struct bkey_ptrs_c ptrs =
+ bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (c->opts.nocow_enabled)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr), 0);
+ percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+ }
+
+ bch2_bkey_buf_exit(&update->k, c);
+ bch2_disk_reservation_put(c, &update->op.res);
+ bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
+}
+
+void bch2_update_unwritten_extent(struct btree_trans *trans,
+ struct data_update *update)
+{
+ struct bch_fs *c = update->op.c;
+ struct bio *bio = &update->op.wbio.bio;
+ struct bkey_i_extent *e;
+ struct write_point *wp;
+ struct bch_extent_ptr *ptr;
+ struct closure cl;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ closure_init_stack(&cl);
+ bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
+
+ while (bio_sectors(bio)) {
+ unsigned sectors = bio_sectors(bio);
+
+ bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
+ BTREE_ITER_SLOTS);
+ ret = lockrestart_do(trans, ({
+ k = bch2_btree_iter_peek_slot(&iter);
+ bkey_err(k);
+ }));
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
+ break;
+
+ e = bkey_extent_init(update->op.insert_keys.top);
+ e->k.p = update->op.pos;
+
+ ret = bch2_alloc_sectors_start_trans(trans,
+ update->op.target,
+ false,
+ update->op.write_point,
+ &update->op.devs_have,
+ update->op.nr_replicas,
+ update->op.nr_replicas,
+ update->op.watermark,
+ 0, &cl, &wp);
+ if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ continue;
+ }
+
+ if (ret)
+ return;
+
+ sectors = min(sectors, wp->sectors_free);
+
+ bch2_key_resize(&e->k, sectors);
+
+ bch2_open_bucket_get(c, wp, &update->op.open_buckets);
+ bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+ bch2_alloc_sectors_done(c, wp);
+
+ bio_advance(bio, sectors << 9);
+ update->op.pos.offset += sectors;
+
+ extent_for_each_ptr(extent_i_to_s(e), ptr)
+ ptr->unwritten = true;
+ bch2_keylist_push(&update->op.insert_keys);
+
+ ret = __bch2_data_update_index_update(trans, &update->op);
+
+ bch2_open_buckets_put(c, &update->op.open_buckets);
+
+ if (ret)
+ break;
+ }
+
+ if (closure_nr_remaining(&cl) != 1) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ }
+}
+
+int bch2_data_update_init(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct data_update *m,
+ struct write_point_specifier wp,
+ struct bch_io_opts io_opts,
+ struct data_update_opts data_opts,
+ enum btree_id btree_id,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ const struct bch_extent_ptr *ptr;
+ unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
+ unsigned ptrs_locked = 0;
+ int ret;
+
+ bch2_bkey_buf_init(&m->k);
+ bch2_bkey_buf_reassemble(&m->k, c, k);
+ m->btree_id = btree_id;
+ m->data_opts = data_opts;
+
+ bch2_write_op_init(&m->op, c, io_opts);
+ m->op.pos = bkey_start_pos(k.k);
+ m->op.version = k.k->version;
+ m->op.target = data_opts.target;
+ m->op.write_point = wp;
+ m->op.nr_replicas = 0;
+ m->op.flags |= BCH_WRITE_PAGES_STABLE|
+ BCH_WRITE_PAGES_OWNED|
+ BCH_WRITE_DATA_ENCODED|
+ BCH_WRITE_MOVE|
+ m->data_opts.write_flags;
+ m->op.compression_opt = io_opts.background_compression ?: io_opts.compression;
+ m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+
+ i = 0;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ bool locked;
+
+ if (((1U << i) & m->data_opts.rewrite_ptrs)) {
+ BUG_ON(p.ptr.cached);
+
+ if (crc_is_compressed(p.crc))
+ reserve_sectors += k.k->size;
+
+ m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
+ } else if (!p.ptr.cached) {
+ bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
+ }
+
+ /*
+ * op->csum_type is normally initialized from the fs/file's
+ * current options - but if an extent is encrypted, we require
+ * that it stays encrypted:
+ */
+ if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
+ m->op.nonce = p.crc.nonce + p.crc.offset;
+ m->op.csum_type = p.crc.csum_type;
+ }
+
+ if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
+ m->op.incompressible = true;
+
+ if (c->opts.nocow_enabled) {
+ if (ctxt) {
+ move_ctxt_wait_event(ctxt, trans,
+ (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0)) ||
+ !atomic_read(&ctxt->read_sectors));
+
+ if (!locked)
+ bch2_bucket_nocow_lock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0);
+ } else {
+ if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0)) {
+ ret = -BCH_ERR_nocow_lock_blocked;
+ goto err;
+ }
+ }
+ ptrs_locked |= (1U << i);
+ }
+
+ i++;
+ }
+
+ if (reserve_sectors) {
+ ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
+ m->data_opts.extra_replicas
+ ? 0
+ : BCH_DISK_RESERVATION_NOFAIL);
+ if (ret)
+ goto err;
+ }
+
+ m->op.nr_replicas += m->data_opts.extra_replicas;
+ m->op.nr_replicas_required = m->op.nr_replicas;
+
+ BUG_ON(!m->op.nr_replicas);
+
+ /* Special handling required: */
+ if (bkey_extent_is_unwritten(k))
+ return -BCH_ERR_unwritten_extent_update;
+ return 0;
+err:
+ i = 0;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if ((1U << i) & ptrs_locked)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, &p.ptr), 0);
+ percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
+ i++;
+ }
+
+ bch2_bkey_buf_exit(&m->k, c);
+ bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
+ return ret;
+}
+
+void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+ unsigned i = 0;
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
+ opts->kill_ptrs |= 1U << i;
+ opts->rewrite_ptrs ^= 1U << i;
+ }
+
+ i++;
+ }
+}
diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h
new file mode 100644
index 000000000000..7ca1f98d7e94
--- /dev/null
+++ b/fs/bcachefs/data_update.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BCACHEFS_DATA_UPDATE_H
+#define _BCACHEFS_DATA_UPDATE_H
+
+#include "bkey_buf.h"
+#include "io_write_types.h"
+
+struct moving_context;
+
+struct data_update_opts {
+ unsigned rewrite_ptrs;
+ unsigned kill_ptrs;
+ u16 target;
+ u8 extra_replicas;
+ unsigned btree_insert_flags;
+ unsigned write_flags;
+};
+
+struct data_update {
+ /* extent being updated: */
+ enum btree_id btree_id;
+ struct bkey_buf k;
+ struct data_update_opts data_opts;
+ struct moving_context *ctxt;
+ struct bch_write_op op;
+};
+
+int bch2_data_update_index_update(struct bch_write_op *);
+
+void bch2_data_update_read_done(struct data_update *,
+ struct bch_extent_crc_unpacked);
+
+void bch2_data_update_exit(struct data_update *);
+void bch2_update_unwritten_extent(struct btree_trans *, struct data_update *);
+int bch2_data_update_init(struct btree_trans *, struct moving_context *,
+ struct data_update *,
+ struct write_point_specifier,
+ struct bch_io_opts, struct data_update_opts,
+ enum btree_id, struct bkey_s_c);
+void bch2_data_update_opts_normalize(struct bkey_s_c, struct data_update_opts *);
+
+#endif /* _BCACHEFS_DATA_UPDATE_H */
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
new file mode 100644
index 000000000000..75a3dc7cbd47
--- /dev/null
+++ b/fs/bcachefs/debug.c
@@ -0,0 +1,954 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Assorted bcachefs debug code
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcachefs.h"
+#include "bkey_methods.h"
+#include "btree_cache.h"
+#include "btree_io.h"
+#include "btree_iter.h"
+#include "btree_locking.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "debug.h"
+#include "error.h"
+#include "extents.h"
+#include "fsck.h"
+#include "inode.h"
+#include "super.h"
+
+#include <linux/console.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+
+static struct dentry *bch_debug;
+
+static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
+ struct extent_ptr_decoded pick)
+{
+ struct btree *v = c->verify_data;
+ struct btree_node *n_ondisk = c->verify_ondisk;
+ struct btree_node *n_sorted = c->verify_data->data;
+ struct bset *sorted, *inmemory = &b->data->keys;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ struct bio *bio;
+ bool failed = false, saw_error = false;
+
+ if (!bch2_dev_get_ioref(ca, READ))
+ return false;
+
+ bio = bio_alloc_bioset(ca->disk_sb.bdev,
+ buf_pages(n_sorted, btree_bytes(c)),
+ REQ_OP_READ|REQ_META,
+ GFP_NOFS,
+ &c->btree_bio);
+ bio->bi_iter.bi_sector = pick.ptr.offset;
+ bch2_bio_map(bio, n_sorted, btree_bytes(c));
+
+ submit_bio_wait(bio);
+
+ bio_put(bio);
+ percpu_ref_put(&ca->io_ref);
+
+ memcpy(n_ondisk, n_sorted, btree_bytes(c));
+
+ v->written = 0;
+ if (bch2_btree_node_read_done(c, ca, v, false, &saw_error) || saw_error)
+ return false;
+
+ n_sorted = c->verify_data->data;
+ sorted = &n_sorted->keys;
+
+ if (inmemory->u64s != sorted->u64s ||
+ memcmp(inmemory->start,
+ sorted->start,
+ vstruct_end(inmemory) - (void *) inmemory->start)) {
+ unsigned offset = 0, sectors;
+ struct bset *i;
+ unsigned j;
+
+ console_lock();
+
+ printk(KERN_ERR "*** in memory:\n");
+ bch2_dump_bset(c, b, inmemory, 0);
+
+ printk(KERN_ERR "*** read back in:\n");
+ bch2_dump_bset(c, v, sorted, 0);
+
+ while (offset < v->written) {
+ if (!offset) {
+ i = &n_ondisk->keys;
+ sectors = vstruct_blocks(n_ondisk, c->block_bits) <<
+ c->block_bits;
+ } else {
+ struct btree_node_entry *bne =
+ (void *) n_ondisk + (offset << 9);
+ i = &bne->keys;
+
+ sectors = vstruct_blocks(bne, c->block_bits) <<
+ c->block_bits;
+ }
+
+ printk(KERN_ERR "*** on disk block %u:\n", offset);
+ bch2_dump_bset(c, b, i, offset);
+
+ offset += sectors;
+ }
+
+ for (j = 0; j < le16_to_cpu(inmemory->u64s); j++)
+ if (inmemory->_data[j] != sorted->_data[j])
+ break;
+
+ console_unlock();
+ bch_err(c, "verify failed at key %u", j);
+
+ failed = true;
+ }
+
+ if (v->written != b->written) {
+ bch_err(c, "written wrong: expected %u, got %u",
+ b->written, v->written);
+ failed = true;
+ }
+
+ return failed;
+}
+
+void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
+{
+ struct bkey_ptrs_c ptrs;
+ struct extent_ptr_decoded p;
+ const union bch_extent_entry *entry;
+ struct btree *v;
+ struct bset *inmemory = &b->data->keys;
+ struct bkey_packed *k;
+ bool failed = false;
+
+ if (c->opts.nochanges)
+ return;
+
+ bch2_btree_node_io_lock(b);
+ mutex_lock(&c->verify_lock);
+
+ if (!c->verify_ondisk) {
+ c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
+ if (!c->verify_ondisk)
+ goto out;
+ }
+
+ if (!c->verify_data) {
+ c->verify_data = __bch2_btree_node_mem_alloc(c);
+ if (!c->verify_data)
+ goto out;
+
+ list_del_init(&c->verify_data->list);
+ }
+
+ BUG_ON(b->nsets != 1);
+
+ for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
+ if (k->type == KEY_TYPE_btree_ptr_v2)
+ ((struct bch_btree_ptr_v2 *) bkeyp_val(&b->format, k))->mem_ptr = 0;
+
+ v = c->verify_data;
+ bkey_copy(&v->key, &b->key);
+ v->c.level = b->c.level;
+ v->c.btree_id = b->c.btree_id;
+ bch2_btree_keys_init(v);
+
+ ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&b->key));
+ bkey_for_each_ptr_decode(&b->key.k, ptrs, p, entry)
+ failed |= bch2_btree_verify_replica(c, b, p);
+
+ if (failed) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ bch2_fs_fatal_error(c, "btree node verify failed for : %s\n", buf.buf);
+ printbuf_exit(&buf);
+ }
+out:
+ mutex_unlock(&c->verify_lock);
+ bch2_btree_node_io_unlock(b);
+}
+
+void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
+ const struct btree *b)
+{
+ struct btree_node *n_ondisk = NULL;
+ struct extent_ptr_decoded pick;
+ struct bch_dev *ca;
+ struct bio *bio = NULL;
+ unsigned offset = 0;
+ int ret;
+
+ if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key), NULL, &pick) <= 0) {
+ prt_printf(out, "error getting device to read from: invalid device\n");
+ return;
+ }
+
+ ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ if (!bch2_dev_get_ioref(ca, READ)) {
+ prt_printf(out, "error getting device to read from: not online\n");
+ return;
+ }
+
+ n_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
+ if (!n_ondisk) {
+ prt_printf(out, "memory allocation failure\n");
+ goto out;
+ }
+
+ bio = bio_alloc_bioset(ca->disk_sb.bdev,
+ buf_pages(n_ondisk, btree_bytes(c)),
+ REQ_OP_READ|REQ_META,
+ GFP_NOFS,
+ &c->btree_bio);
+ bio->bi_iter.bi_sector = pick.ptr.offset;
+ bch2_bio_map(bio, n_ondisk, btree_bytes(c));
+
+ ret = submit_bio_wait(bio);
+ if (ret) {
+ prt_printf(out, "IO error reading btree node: %s\n", bch2_err_str(ret));
+ goto out;
+ }
+
+ while (offset < btree_sectors(c)) {
+ struct bset *i;
+ struct nonce nonce;
+ struct bch_csum csum;
+ struct bkey_packed *k;
+ unsigned sectors;
+
+ if (!offset) {
+ i = &n_ondisk->keys;
+
+ if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
+ prt_printf(out, "unknown checksum type at offset %u: %llu\n",
+ offset, BSET_CSUM_TYPE(i));
+ goto out;
+ }
+
+ nonce = btree_nonce(i, offset << 9);
+ csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, n_ondisk);
+
+ if (bch2_crc_cmp(csum, n_ondisk->csum)) {
+ prt_printf(out, "invalid checksum\n");
+ goto out;
+ }
+
+ bset_encrypt(c, i, offset << 9);
+
+ sectors = vstruct_sectors(n_ondisk, c->block_bits);
+ } else {
+ struct btree_node_entry *bne = (void *) n_ondisk + (offset << 9);
+
+ i = &bne->keys;
+
+ if (i->seq != n_ondisk->keys.seq)
+ break;
+
+ if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i))) {
+ prt_printf(out, "unknown checksum type at offset %u: %llu\n",
+ offset, BSET_CSUM_TYPE(i));
+ goto out;
+ }
+
+ nonce = btree_nonce(i, offset << 9);
+ csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
+
+ if (bch2_crc_cmp(csum, bne->csum)) {
+ prt_printf(out, "invalid checksum");
+ goto out;
+ }
+
+ bset_encrypt(c, i, offset << 9);
+
+ sectors = vstruct_sectors(bne, c->block_bits);
+ }
+
+ prt_printf(out, " offset %u version %u, journal seq %llu\n",
+ offset,
+ le16_to_cpu(i->version),
+ le64_to_cpu(i->journal_seq));
+ offset += sectors;
+
+ printbuf_indent_add(out, 4);
+
+ for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) {
+ struct bkey u;
+
+ bch2_bkey_val_to_text(out, c, bkey_disassemble(b, k, &u));
+ prt_newline(out);
+ }
+
+ printbuf_indent_sub(out, 4);
+ }
+out:
+ if (bio)
+ bio_put(bio);
+ kvpfree(n_ondisk, btree_bytes(c));
+ percpu_ref_put(&ca->io_ref);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+/* XXX: bch_fs refcounting */
+
+struct dump_iter {
+ struct bch_fs *c;
+ enum btree_id id;
+ struct bpos from;
+ struct bpos prev_node;
+ u64 iter;
+
+ struct printbuf buf;
+
+ char __user *ubuf; /* destination user buffer */
+ size_t size; /* size of requested read */
+ ssize_t ret; /* bytes read so far */
+};
+
+static ssize_t flush_buf(struct dump_iter *i)
+{
+ if (i->buf.pos) {
+ size_t bytes = min_t(size_t, i->buf.pos, i->size);
+ int copied = bytes - copy_to_user(i->ubuf, i->buf.buf, bytes);
+
+ i->ret += copied;
+ i->ubuf += copied;
+ i->size -= copied;
+ i->buf.pos -= copied;
+ memmove(i->buf.buf, i->buf.buf + copied, i->buf.pos);
+
+ if (copied != bytes)
+ return -EFAULT;
+ }
+
+ return i->size ? 0 : i->ret;
+}
+
+static int bch2_dump_open(struct inode *inode, struct file *file)
+{
+ struct btree_debug *bd = inode->i_private;
+ struct dump_iter *i;
+
+ i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
+ if (!i)
+ return -ENOMEM;
+
+ file->private_data = i;
+ i->from = POS_MIN;
+ i->iter = 0;
+ i->c = container_of(bd, struct bch_fs, btree_debug[bd->id]);
+ i->id = bd->id;
+ i->buf = PRINTBUF;
+
+ return 0;
+}
+
+static int bch2_dump_release(struct inode *inode, struct file *file)
+{
+ struct dump_iter *i = file->private_data;
+
+ printbuf_exit(&i->buf);
+ kfree(i);
+ return 0;
+}
+
+static ssize_t bch2_read_btree(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ ssize_t ret;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ ret = flush_buf(i);
+ if (ret)
+ return ret;
+
+ trans = bch2_trans_get(i->c);
+ ret = for_each_btree_key2(trans, iter, i->id, i->from,
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ bch2_bkey_val_to_text(&i->buf, i->c, k);
+ prt_newline(&i->buf);
+ drop_locks_do(trans, flush_buf(i));
+ }));
+ i->from = iter.pos;
+
+ bch2_trans_put(trans);
+
+ if (!ret)
+ ret = flush_buf(i);
+
+ return ret ?: i->ret;
+}
+
+static const struct file_operations btree_debug_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_read_btree,
+};
+
+static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct btree *b;
+ ssize_t ret;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ ret = flush_buf(i);
+ if (ret)
+ return ret;
+
+ if (bpos_eq(SPOS_MAX, i->from))
+ return i->ret;
+
+ trans = bch2_trans_get(i->c);
+retry:
+ bch2_trans_begin(trans);
+
+ for_each_btree_node(trans, iter, i->id, i->from, 0, b, ret) {
+ bch2_btree_node_to_text(&i->buf, i->c, b);
+ i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
+ ? bpos_successor(b->key.k.p)
+ : b->key.k.p;
+
+ ret = drop_locks_do(trans, flush_buf(i));
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+
+ if (!ret)
+ ret = flush_buf(i);
+
+ return ret ?: i->ret;
+}
+
+static const struct file_operations btree_format_debug_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_read_btree_formats,
+};
+
+static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ ssize_t ret;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ ret = flush_buf(i);
+ if (ret)
+ return ret;
+
+ trans = bch2_trans_get(i->c);
+
+ ret = for_each_btree_key2(trans, iter, i->id, i->from,
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ struct btree_path_level *l = &iter.path->l[0];
+ struct bkey_packed *_k =
+ bch2_btree_node_iter_peek(&l->iter, l->b);
+
+ if (bpos_gt(l->b->key.k.p, i->prev_node)) {
+ bch2_btree_node_to_text(&i->buf, i->c, l->b);
+ i->prev_node = l->b->key.k.p;
+ }
+
+ bch2_bfloat_to_text(&i->buf, l->b, _k);
+ drop_locks_do(trans, flush_buf(i));
+ }));
+ i->from = iter.pos;
+
+ bch2_trans_put(trans);
+
+ if (!ret)
+ ret = flush_buf(i);
+
+ return ret ?: i->ret;
+}
+
+static const struct file_operations bfloat_failed_debug_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_read_bfloat_failed,
+};
+
+static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
+ struct btree *b)
+{
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 32);
+
+ prt_printf(out, "%px btree=%s l=%u ",
+ b,
+ bch2_btree_ids[b->c.btree_id],
+ b->c.level);
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+ prt_newline(out);
+
+ prt_printf(out, "flags: ");
+ prt_tab(out);
+ prt_bitflags(out, bch2_btree_node_flags, b->flags);
+ prt_newline(out);
+
+ prt_printf(out, "pcpu read locks: ");
+ prt_tab(out);
+ prt_printf(out, "%u", b->c.lock.readers != NULL);
+ prt_newline(out);
+
+ prt_printf(out, "written:");
+ prt_tab(out);
+ prt_printf(out, "%u", b->written);
+ prt_newline(out);
+
+ prt_printf(out, "writes blocked:");
+ prt_tab(out);
+ prt_printf(out, "%u", !list_empty_careful(&b->write_blocked));
+ prt_newline(out);
+
+ prt_printf(out, "will make reachable:");
+ prt_tab(out);
+ prt_printf(out, "%lx", b->will_make_reachable);
+ prt_newline(out);
+
+ prt_printf(out, "journal pin %px:", &b->writes[0].journal);
+ prt_tab(out);
+ prt_printf(out, "%llu", b->writes[0].journal.seq);
+ prt_newline(out);
+
+ prt_printf(out, "journal pin %px:", &b->writes[1].journal);
+ prt_tab(out);
+ prt_printf(out, "%llu", b->writes[1].journal.seq);
+ prt_newline(out);
+
+ printbuf_indent_sub(out, 2);
+}
+
+static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ bool done = false;
+ ssize_t ret = 0;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ do {
+ struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct btree *b;
+
+ ret = flush_buf(i);
+ if (ret)
+ return ret;
+
+ rcu_read_lock();
+ i->buf.atomic++;
+ tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
+ &c->btree_cache.table);
+ if (i->iter < tbl->size) {
+ rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
+ bch2_cached_btree_node_to_text(&i->buf, c, b);
+ i->iter++;
+ } else {
+ done = true;
+ }
+ --i->buf.atomic;
+ rcu_read_unlock();
+ } while (!done);
+
+ if (i->buf.allocation_failure)
+ ret = -ENOMEM;
+
+ if (!ret)
+ ret = flush_buf(i);
+
+ return ret ?: i->ret;
+}
+
+static const struct file_operations cached_btree_nodes_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_cached_btree_nodes_read,
+};
+
+#ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
+static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ struct btree_trans *trans;
+ ssize_t ret = 0;
+ u32 seq;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ if (trans->locking_wait.task->pid <= i->iter)
+ continue;
+
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
+ ret = flush_buf(i);
+ if (ret) {
+ closure_put(&trans->ref);
+ goto unlocked;
+ }
+
+ bch2_btree_trans_to_text(&i->buf, trans);
+
+ prt_printf(&i->buf, "backtrace:");
+ prt_newline(&i->buf);
+ printbuf_indent_add(&i->buf, 2);
+ bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task);
+ printbuf_indent_sub(&i->buf, 2);
+ prt_newline(&i->buf);
+
+ i->iter = trans->locking_wait.task->pid;
+
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
+ }
+ seqmutex_unlock(&c->btree_trans_lock);
+unlocked:
+ if (i->buf.allocation_failure)
+ ret = -ENOMEM;
+
+ if (!ret)
+ ret = flush_buf(i);
+
+ return ret ?: i->ret;
+}
+
+static const struct file_operations btree_transactions_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_btree_transactions_read,
+};
+#endif /* CONFIG_BCACHEFS_DEBUG_TRANSACTIONS */
+
+static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ bool done = false;
+ int err;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ do {
+ err = flush_buf(i);
+ if (err)
+ return err;
+
+ if (!i->size)
+ break;
+
+ done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
+ i->iter++;
+ } while (!done);
+
+ if (i->buf.allocation_failure)
+ return -ENOMEM;
+
+ return i->ret;
+}
+
+static const struct file_operations journal_pins_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_journal_pins_read,
+};
+
+static int lock_held_stats_open(struct inode *inode, struct file *file)
+{
+ struct bch_fs *c = inode->i_private;
+ struct dump_iter *i;
+
+ i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
+
+ if (!i)
+ return -ENOMEM;
+
+ i->iter = 0;
+ i->c = c;
+ i->buf = PRINTBUF;
+ file->private_data = i;
+
+ return 0;
+}
+
+static int lock_held_stats_release(struct inode *inode, struct file *file)
+{
+ struct dump_iter *i = file->private_data;
+
+ printbuf_exit(&i->buf);
+ kfree(i);
+
+ return 0;
+}
+
+static ssize_t lock_held_stats_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ int err;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ while (1) {
+ struct btree_transaction_stats *s = &c->btree_transaction_stats[i->iter];
+
+ err = flush_buf(i);
+ if (err)
+ return err;
+
+ if (!i->size)
+ break;
+
+ if (i->iter == ARRAY_SIZE(bch2_btree_transaction_fns) ||
+ !bch2_btree_transaction_fns[i->iter])
+ break;
+
+ prt_printf(&i->buf, "%s: ", bch2_btree_transaction_fns[i->iter]);
+ prt_newline(&i->buf);
+ printbuf_indent_add(&i->buf, 2);
+
+ mutex_lock(&s->lock);
+
+ prt_printf(&i->buf, "Max mem used: %u", s->max_mem);
+ prt_newline(&i->buf);
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS)) {
+ prt_printf(&i->buf, "Lock hold times:");
+ prt_newline(&i->buf);
+
+ printbuf_indent_add(&i->buf, 2);
+ bch2_time_stats_to_text(&i->buf, &s->lock_hold_times);
+ printbuf_indent_sub(&i->buf, 2);
+ }
+
+ if (s->max_paths_text) {
+ prt_printf(&i->buf, "Maximum allocated btree paths (%u):", s->nr_max_paths);
+ prt_newline(&i->buf);
+
+ printbuf_indent_add(&i->buf, 2);
+ prt_str_indented(&i->buf, s->max_paths_text);
+ printbuf_indent_sub(&i->buf, 2);
+ }
+
+ mutex_unlock(&s->lock);
+
+ printbuf_indent_sub(&i->buf, 2);
+ prt_newline(&i->buf);
+ i->iter++;
+ }
+
+ if (i->buf.allocation_failure)
+ return -ENOMEM;
+
+ return i->ret;
+}
+
+static const struct file_operations lock_held_stats_op = {
+ .owner = THIS_MODULE,
+ .open = lock_held_stats_open,
+ .release = lock_held_stats_release,
+ .read = lock_held_stats_read,
+};
+
+static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ struct btree_trans *trans;
+ ssize_t ret = 0;
+ u32 seq;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ if (i->iter)
+ goto out;
+restart:
+ seqmutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ if (trans->locking_wait.task->pid <= i->iter)
+ continue;
+
+ closure_get(&trans->ref);
+ seq = seqmutex_seq(&c->btree_trans_lock);
+ seqmutex_unlock(&c->btree_trans_lock);
+
+ ret = flush_buf(i);
+ if (ret) {
+ closure_put(&trans->ref);
+ goto out;
+ }
+
+ bch2_check_for_deadlock(trans, &i->buf);
+
+ i->iter = trans->locking_wait.task->pid;
+
+ closure_put(&trans->ref);
+
+ if (!seqmutex_relock(&c->btree_trans_lock, seq))
+ goto restart;
+ }
+ seqmutex_unlock(&c->btree_trans_lock);
+out:
+ if (i->buf.allocation_failure)
+ ret = -ENOMEM;
+
+ if (!ret)
+ ret = flush_buf(i);
+
+ return ret ?: i->ret;
+}
+
+static const struct file_operations btree_deadlock_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_btree_deadlock_read,
+};
+
+void bch2_fs_debug_exit(struct bch_fs *c)
+{
+ if (!IS_ERR_OR_NULL(c->fs_debug_dir))
+ debugfs_remove_recursive(c->fs_debug_dir);
+}
+
+void bch2_fs_debug_init(struct bch_fs *c)
+{
+ struct btree_debug *bd;
+ char name[100];
+
+ if (IS_ERR_OR_NULL(bch_debug))
+ return;
+
+ snprintf(name, sizeof(name), "%pU", c->sb.user_uuid.b);
+ c->fs_debug_dir = debugfs_create_dir(name, bch_debug);
+ if (IS_ERR_OR_NULL(c->fs_debug_dir))
+ return;
+
+ debugfs_create_file("cached_btree_nodes", 0400, c->fs_debug_dir,
+ c->btree_debug, &cached_btree_nodes_ops);
+
+#ifdef CONFIG_BCACHEFS_DEBUG_TRANSACTIONS
+ debugfs_create_file("btree_transactions", 0400, c->fs_debug_dir,
+ c->btree_debug, &btree_transactions_ops);
+#endif
+
+ debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
+ c->btree_debug, &journal_pins_ops);
+
+ debugfs_create_file("btree_transaction_stats", 0400, c->fs_debug_dir,
+ c, &lock_held_stats_op);
+
+ debugfs_create_file("btree_deadlock", 0400, c->fs_debug_dir,
+ c->btree_debug, &btree_deadlock_ops);
+
+ c->btree_debug_dir = debugfs_create_dir("btrees", c->fs_debug_dir);
+ if (IS_ERR_OR_NULL(c->btree_debug_dir))
+ return;
+
+ for (bd = c->btree_debug;
+ bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
+ bd++) {
+ bd->id = bd - c->btree_debug;
+ debugfs_create_file(bch2_btree_ids[bd->id],
+ 0400, c->btree_debug_dir, bd,
+ &btree_debug_ops);
+
+ snprintf(name, sizeof(name), "%s-formats",
+ bch2_btree_ids[bd->id]);
+
+ debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
+ &btree_format_debug_ops);
+
+ snprintf(name, sizeof(name), "%s-bfloat-failed",
+ bch2_btree_ids[bd->id]);
+
+ debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
+ &bfloat_failed_debug_ops);
+ }
+}
+
+#endif
+
+void bch2_debug_exit(void)
+{
+ if (!IS_ERR_OR_NULL(bch_debug))
+ debugfs_remove_recursive(bch_debug);
+}
+
+int __init bch2_debug_init(void)
+{
+ int ret = 0;
+
+ bch_debug = debugfs_create_dir("bcachefs", NULL);
+ return ret;
+}
diff --git a/fs/bcachefs/debug.h b/fs/bcachefs/debug.h
new file mode 100644
index 000000000000..2c37143b5fd1
--- /dev/null
+++ b/fs/bcachefs/debug.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DEBUG_H
+#define _BCACHEFS_DEBUG_H
+
+#include "bcachefs.h"
+
+struct bio;
+struct btree;
+struct bch_fs;
+
+void __bch2_btree_verify(struct bch_fs *, struct btree *);
+void bch2_btree_node_ondisk_to_text(struct printbuf *, struct bch_fs *,
+ const struct btree *);
+
+static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b)
+{
+ if (bch2_verify_btree_ondisk)
+ __bch2_btree_verify(c, b);
+}
+
+#ifdef CONFIG_DEBUG_FS
+void bch2_fs_debug_exit(struct bch_fs *);
+void bch2_fs_debug_init(struct bch_fs *);
+#else
+static inline void bch2_fs_debug_exit(struct bch_fs *c) {}
+static inline void bch2_fs_debug_init(struct bch_fs *c) {}
+#endif
+
+void bch2_debug_exit(void);
+int bch2_debug_init(void);
+
+#endif /* _BCACHEFS_DEBUG_H */
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
new file mode 100644
index 000000000000..6c6c8d57d72b
--- /dev/null
+++ b/fs/bcachefs/dirent.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "bkey_methods.h"
+#include "btree_update.h"
+#include "extents.h"
+#include "dirent.h"
+#include "fs.h"
+#include "keylist.h"
+#include "str_hash.h"
+#include "subvolume.h"
+
+#include <linux/dcache.h>
+
+static unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
+{
+ unsigned bkey_u64s = bkey_val_u64s(d.k);
+ unsigned bkey_bytes = bkey_u64s * sizeof(u64);
+ u64 last_u64 = ((u64*)d.v)[bkey_u64s - 1];
+#if CPU_BIG_ENDIAN
+ unsigned trailing_nuls = last_u64 ? __builtin_ctzll(last_u64) / 8 : 64 / 8;
+#else
+ unsigned trailing_nuls = last_u64 ? __builtin_clzll(last_u64) / 8 : 64 / 8;
+#endif
+
+ return bkey_bytes -
+ offsetof(struct bch_dirent, d_name) -
+ trailing_nuls;
+}
+
+struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d)
+{
+ return (struct qstr) QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d));
+}
+
+static u64 bch2_dirent_hash(const struct bch_hash_info *info,
+ const struct qstr *name)
+{
+ struct bch_str_hash_ctx ctx;
+
+ bch2_str_hash_init(&ctx, info);
+ bch2_str_hash_update(&ctx, info, name->name, name->len);
+
+ /* [0,2) reserved for dots */
+ return max_t(u64, bch2_str_hash_end(&ctx, info), 2);
+}
+
+static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key)
+{
+ return bch2_dirent_hash(info, key);
+}
+
+static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
+{
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
+ struct qstr name = bch2_dirent_get_name(d);
+
+ return bch2_dirent_hash(info, &name);
+}
+
+static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r)
+{
+ struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
+ const struct qstr l_name = bch2_dirent_get_name(l);
+ const struct qstr *r_name = _r;
+
+ return l_name.len - r_name->len ?: memcmp(l_name.name, r_name->name, l_name.len);
+}
+
+static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
+{
+ struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
+ struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r);
+ const struct qstr l_name = bch2_dirent_get_name(l);
+ const struct qstr r_name = bch2_dirent_get_name(r);
+
+ return l_name.len - r_name.len ?: memcmp(l_name.name, r_name.name, l_name.len);
+}
+
+static bool dirent_is_visible(subvol_inum inum, struct bkey_s_c k)
+{
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
+
+ if (d.v->d_type == DT_SUBVOL)
+ return le32_to_cpu(d.v->d_parent_subvol) == inum.subvol;
+ return true;
+}
+
+const struct bch_hash_desc bch2_dirent_hash_desc = {
+ .btree_id = BTREE_ID_dirents,
+ .key_type = KEY_TYPE_dirent,
+ .hash_key = dirent_hash_key,
+ .hash_bkey = dirent_hash_bkey,
+ .cmp_key = dirent_cmp_key,
+ .cmp_bkey = dirent_cmp_bkey,
+ .is_visible = dirent_is_visible,
+};
+
+int bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
+ struct qstr d_name = bch2_dirent_get_name(d);
+
+ if (!d_name.len) {
+ prt_printf(err, "empty name");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len)) {
+ prt_printf(err, "value too big (%zu > %u)",
+ bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ /*
+ * Check new keys don't exceed the max length
+ * (older keys may be larger.)
+ */
+ if ((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX) {
+ prt_printf(err, "dirent name too big (%u > %u)",
+ d_name.len, BCH_NAME_MAX);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (d_name.len != strnlen(d_name.name, d_name.len)) {
+ prt_printf(err, "dirent has stray data after name's NUL");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (d_name.len == 1 && !memcmp(d_name.name, ".", 1)) {
+ prt_printf(err, "invalid name");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (d_name.len == 2 && !memcmp(d_name.name, "..", 2)) {
+ prt_printf(err, "invalid name");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (memchr(d_name.name, '/', d_name.len)) {
+ prt_printf(err, "invalid name");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (d.v->d_type != DT_SUBVOL &&
+ le64_to_cpu(d.v->d_inum) == d.k->p.inode) {
+ prt_printf(err, "dirent points to own directory");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
+ struct qstr d_name = bch2_dirent_get_name(d);
+
+ prt_printf(out, "%.*s -> %llu type %s",
+ d_name.len,
+ d_name.name,
+ d.v->d_type != DT_SUBVOL
+ ? le64_to_cpu(d.v->d_inum)
+ : le32_to_cpu(d.v->d_child_subvol),
+ bch2_d_type_str(d.v->d_type));
+}
+
+static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
+ subvol_inum dir, u8 type,
+ const struct qstr *name, u64 dst)
+{
+ struct bkey_i_dirent *dirent;
+ unsigned u64s = BKEY_U64s + dirent_val_u64s(name->len);
+
+ if (name->len > BCH_NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ BUG_ON(u64s > U8_MAX);
+
+ dirent = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
+ if (IS_ERR(dirent))
+ return dirent;
+
+ bkey_dirent_init(&dirent->k_i);
+ dirent->k.u64s = u64s;
+
+ if (type != DT_SUBVOL) {
+ dirent->v.d_inum = cpu_to_le64(dst);
+ } else {
+ dirent->v.d_parent_subvol = cpu_to_le32(dir.subvol);
+ dirent->v.d_child_subvol = cpu_to_le32(dst);
+ }
+
+ dirent->v.d_type = type;
+
+ memcpy(dirent->v.d_name, name->name, name->len);
+ memset(dirent->v.d_name + name->len, 0,
+ bkey_val_bytes(&dirent->k) -
+ offsetof(struct bch_dirent, d_name) -
+ name->len);
+
+ EBUG_ON(bch2_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len);
+
+ return dirent;
+}
+
+int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
+ const struct bch_hash_info *hash_info,
+ u8 type, const struct qstr *name, u64 dst_inum,
+ u64 *dir_offset, int flags)
+{
+ struct bkey_i_dirent *dirent;
+ int ret;
+
+ dirent = dirent_create_key(trans, dir, type, name, dst_inum);
+ ret = PTR_ERR_OR_ZERO(dirent);
+ if (ret)
+ return ret;
+
+ ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
+ dir, &dirent->k_i, flags);
+ *dir_offset = dirent->k.p.offset;
+
+ return ret;
+}
+
+static void dirent_copy_target(struct bkey_i_dirent *dst,
+ struct bkey_s_c_dirent src)
+{
+ dst->v.d_inum = src.v->d_inum;
+ dst->v.d_type = src.v->d_type;
+}
+
+int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
+ struct bkey_s_c_dirent d, subvol_inum *target)
+{
+ struct bch_subvolume s;
+ int ret = 0;
+
+ if (d.v->d_type == DT_SUBVOL &&
+ le32_to_cpu(d.v->d_parent_subvol) != dir.subvol)
+ return 1;
+
+ if (likely(d.v->d_type != DT_SUBVOL)) {
+ target->subvol = dir.subvol;
+ target->inum = le64_to_cpu(d.v->d_inum);
+ } else {
+ target->subvol = le32_to_cpu(d.v->d_child_subvol);
+
+ ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_CACHED, &s);
+
+ target->inum = le64_to_cpu(s.inode);
+ }
+
+ return ret;
+}
+
+int bch2_dirent_rename(struct btree_trans *trans,
+ subvol_inum src_dir, struct bch_hash_info *src_hash,
+ subvol_inum dst_dir, struct bch_hash_info *dst_hash,
+ const struct qstr *src_name, subvol_inum *src_inum, u64 *src_offset,
+ const struct qstr *dst_name, subvol_inum *dst_inum, u64 *dst_offset,
+ enum bch_rename_mode mode)
+{
+ struct btree_iter src_iter = { NULL };
+ struct btree_iter dst_iter = { NULL };
+ struct bkey_s_c old_src, old_dst = bkey_s_c_null;
+ struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
+ struct bpos dst_pos =
+ POS(dst_dir.inum, bch2_dirent_hash(dst_hash, dst_name));
+ unsigned src_type = 0, dst_type = 0, src_update_flags = 0;
+ int ret = 0;
+
+ if (src_dir.subvol != dst_dir.subvol)
+ return -EXDEV;
+
+ memset(src_inum, 0, sizeof(*src_inum));
+ memset(dst_inum, 0, sizeof(*dst_inum));
+
+ /* Lookup src: */
+ ret = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
+ src_hash, src_dir, src_name,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto out;
+
+ old_src = bch2_btree_iter_peek_slot(&src_iter);
+ ret = bkey_err(old_src);
+ if (ret)
+ goto out;
+
+ ret = bch2_dirent_read_target(trans, src_dir,
+ bkey_s_c_to_dirent(old_src), src_inum);
+ if (ret)
+ goto out;
+
+ src_type = bkey_s_c_to_dirent(old_src).v->d_type;
+
+ if (src_type == DT_SUBVOL && mode == BCH_RENAME_EXCHANGE)
+ return -EOPNOTSUPP;
+
+
+ /* Lookup dst: */
+ if (mode == BCH_RENAME) {
+ /*
+ * Note that we're _not_ checking if the target already exists -
+ * we're relying on the VFS to do that check for us for
+ * correctness:
+ */
+ ret = bch2_hash_hole(trans, &dst_iter, bch2_dirent_hash_desc,
+ dst_hash, dst_dir, dst_name);
+ if (ret)
+ goto out;
+ } else {
+ ret = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
+ dst_hash, dst_dir, dst_name,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto out;
+
+ old_dst = bch2_btree_iter_peek_slot(&dst_iter);
+ ret = bkey_err(old_dst);
+ if (ret)
+ goto out;
+
+ ret = bch2_dirent_read_target(trans, dst_dir,
+ bkey_s_c_to_dirent(old_dst), dst_inum);
+ if (ret)
+ goto out;
+
+ dst_type = bkey_s_c_to_dirent(old_dst).v->d_type;
+
+ if (dst_type == DT_SUBVOL)
+ return -EOPNOTSUPP;
+ }
+
+ if (mode != BCH_RENAME_EXCHANGE)
+ *src_offset = dst_iter.pos.offset;
+
+ /* Create new dst key: */
+ new_dst = dirent_create_key(trans, dst_dir, 0, dst_name, 0);
+ ret = PTR_ERR_OR_ZERO(new_dst);
+ if (ret)
+ goto out;
+
+ dirent_copy_target(new_dst, bkey_s_c_to_dirent(old_src));
+ new_dst->k.p = dst_iter.pos;
+
+ /* Create new src key: */
+ if (mode == BCH_RENAME_EXCHANGE) {
+ new_src = dirent_create_key(trans, src_dir, 0, src_name, 0);
+ ret = PTR_ERR_OR_ZERO(new_src);
+ if (ret)
+ goto out;
+
+ dirent_copy_target(new_src, bkey_s_c_to_dirent(old_dst));
+ new_src->k.p = src_iter.pos;
+ } else {
+ new_src = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
+ ret = PTR_ERR_OR_ZERO(new_src);
+ if (ret)
+ goto out;
+
+ bkey_init(&new_src->k);
+ new_src->k.p = src_iter.pos;
+
+ if (bkey_le(dst_pos, src_iter.pos) &&
+ bkey_lt(src_iter.pos, dst_iter.pos)) {
+ /*
+ * We have a hash collision for the new dst key,
+ * and new_src - the key we're deleting - is between
+ * new_dst's hashed slot and the slot we're going to be
+ * inserting it into - oops. This will break the hash
+ * table if we don't deal with it:
+ */
+ if (mode == BCH_RENAME) {
+ /*
+ * If we're not overwriting, we can just insert
+ * new_dst at the src position:
+ */
+ new_src = new_dst;
+ new_src->k.p = src_iter.pos;
+ goto out_set_src;
+ } else {
+ /* If we're overwriting, we can't insert new_dst
+ * at a different slot because it has to
+ * overwrite old_dst - just make sure to use a
+ * whiteout when deleting src:
+ */
+ new_src->k.type = KEY_TYPE_hash_whiteout;
+ }
+ } else {
+ /* Check if we need a whiteout to delete src: */
+ ret = bch2_hash_needs_whiteout(trans, bch2_dirent_hash_desc,
+ src_hash, &src_iter);
+ if (ret < 0)
+ goto out;
+
+ if (ret)
+ new_src->k.type = KEY_TYPE_hash_whiteout;
+ }
+ }
+
+ ret = bch2_trans_update(trans, &dst_iter, &new_dst->k_i, 0);
+ if (ret)
+ goto out;
+out_set_src:
+
+ /*
+ * If we're deleting a subvolume, we need to really delete the dirent,
+ * not just emit a whiteout in the current snapshot:
+ */
+ if (src_type == DT_SUBVOL) {
+ bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(&src_iter);
+ if (ret)
+ goto out;
+
+ new_src->k.p = src_iter.pos;
+ src_update_flags |= BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE;
+ }
+
+ ret = bch2_trans_update(trans, &src_iter, &new_src->k_i, src_update_flags);
+ if (ret)
+ goto out;
+
+ if (mode == BCH_RENAME_EXCHANGE)
+ *src_offset = new_src->k.p.offset;
+ *dst_offset = new_dst->k.p.offset;
+out:
+ bch2_trans_iter_exit(trans, &src_iter);
+ bch2_trans_iter_exit(trans, &dst_iter);
+ return ret;
+}
+
+int __bch2_dirent_lookup_trans(struct btree_trans *trans,
+ struct btree_iter *iter,
+ subvol_inum dir,
+ const struct bch_hash_info *hash_info,
+ const struct qstr *name, subvol_inum *inum,
+ unsigned flags)
+{
+ struct bkey_s_c k;
+ struct bkey_s_c_dirent d;
+ u32 snapshot;
+ int ret;
+
+ ret = bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot);
+ if (ret)
+ return ret;
+
+ ret = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
+ hash_info, dir, name, flags);
+ if (ret)
+ return ret;
+
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ d = bkey_s_c_to_dirent(k);
+
+ ret = bch2_dirent_read_target(trans, dir, d, inum);
+ if (ret > 0)
+ ret = -ENOENT;
+err:
+ if (ret)
+ bch2_trans_iter_exit(trans, iter);
+
+ return ret;
+}
+
+u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
+ const struct bch_hash_info *hash_info,
+ const struct qstr *name, subvol_inum *inum)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ int ret;
+retry:
+ bch2_trans_begin(trans);
+
+ ret = __bch2_dirent_lookup_trans(trans, &iter, dir, hash_info,
+ name, inum, 0);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ if (!ret)
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot;
+ int ret;
+
+ ret = bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot);
+ if (ret)
+ return ret;
+
+ for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents,
+ SPOS(dir.inum, 0, snapshot),
+ POS(dir.inum, U64_MAX), 0, k, ret)
+ if (k.k->type == KEY_TYPE_dirent) {
+ ret = -ENOTEMPTY;
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_dirent dirent;
+ subvol_inum target;
+ u32 snapshot;
+ struct bkey_buf sk;
+ struct qstr name;
+ int ret;
+
+ bch2_bkey_buf_init(&sk);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents,
+ SPOS(inum.inum, ctx->pos, snapshot),
+ POS(inum.inum, U64_MAX), 0, k, ret) {
+ if (k.k->type != KEY_TYPE_dirent)
+ continue;
+
+ dirent = bkey_s_c_to_dirent(k);
+
+ ret = bch2_dirent_read_target(trans, inum, dirent, &target);
+ if (ret < 0)
+ break;
+ if (ret)
+ continue;
+
+ /* dir_emit() can fault and block: */
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ dirent = bkey_i_to_s_c_dirent(sk.k);
+ bch2_trans_unlock(trans);
+
+ name = bch2_dirent_get_name(dirent);
+
+ ctx->pos = dirent.k->p.offset;
+ if (!dir_emit(ctx, name.name,
+ name.len,
+ target.inum,
+ vfs_d_type(dirent.v->d_type)))
+ break;
+ ctx->pos = dirent.k->p.offset + 1;
+
+ /*
+ * read_target looks up subvolumes, we can overflow paths if the
+ * directory has many subvolumes in it
+ */
+ ret = btree_trans_too_many_iters(trans);
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&sk, c);
+
+ return ret;
+}
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
new file mode 100644
index 000000000000..e9fa1df38232
--- /dev/null
+++ b/fs/bcachefs/dirent.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DIRENT_H
+#define _BCACHEFS_DIRENT_H
+
+#include "str_hash.h"
+
+enum bkey_invalid_flags;
+extern const struct bch_hash_desc bch2_dirent_hash_desc;
+
+int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_dirent ((struct bkey_ops) { \
+ .key_invalid = bch2_dirent_invalid, \
+ .val_to_text = bch2_dirent_to_text, \
+ .min_val_size = 16, \
+})
+
+struct qstr;
+struct file;
+struct dir_context;
+struct bch_fs;
+struct bch_hash_info;
+struct bch_inode_info;
+
+struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d);
+
+static inline unsigned dirent_val_u64s(unsigned len)
+{
+ return DIV_ROUND_UP(offsetof(struct bch_dirent, d_name) + len,
+ sizeof(u64));
+}
+
+int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
+ struct bkey_s_c_dirent, subvol_inum *);
+
+int bch2_dirent_create(struct btree_trans *, subvol_inum,
+ const struct bch_hash_info *, u8,
+ const struct qstr *, u64, u64 *, int);
+
+static inline unsigned vfs_d_type(unsigned type)
+{
+ return type == DT_SUBVOL ? DT_DIR : type;
+}
+
+enum bch_rename_mode {
+ BCH_RENAME,
+ BCH_RENAME_OVERWRITE,
+ BCH_RENAME_EXCHANGE,
+};
+
+int bch2_dirent_rename(struct btree_trans *,
+ subvol_inum, struct bch_hash_info *,
+ subvol_inum, struct bch_hash_info *,
+ const struct qstr *, subvol_inum *, u64 *,
+ const struct qstr *, subvol_inum *, u64 *,
+ enum bch_rename_mode);
+
+int __bch2_dirent_lookup_trans(struct btree_trans *, struct btree_iter *,
+ subvol_inum, const struct bch_hash_info *,
+ const struct qstr *, subvol_inum *, unsigned);
+u64 bch2_dirent_lookup(struct bch_fs *, subvol_inum,
+ const struct bch_hash_info *,
+ const struct qstr *, subvol_inum *);
+
+int bch2_empty_dir_trans(struct btree_trans *, subvol_inum);
+int bch2_readdir(struct bch_fs *, subvol_inum, struct dir_context *);
+
+#endif /* _BCACHEFS_DIRENT_H */
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
new file mode 100644
index 000000000000..e00133b6ea51
--- /dev/null
+++ b/fs/bcachefs/disk_groups.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "disk_groups.h"
+#include "sb-members.h"
+#include "super-io.h"
+
+#include <linux/sort.h>
+
+static int group_cmp(const void *_l, const void *_r)
+{
+ const struct bch_disk_group *l = _l;
+ const struct bch_disk_group *r = _r;
+
+ return ((BCH_GROUP_DELETED(l) > BCH_GROUP_DELETED(r)) -
+ (BCH_GROUP_DELETED(l) < BCH_GROUP_DELETED(r))) ?:
+ ((BCH_GROUP_PARENT(l) > BCH_GROUP_PARENT(r)) -
+ (BCH_GROUP_PARENT(l) < BCH_GROUP_PARENT(r))) ?:
+ strncmp(l->label, r->label, sizeof(l->label));
+}
+
+static int bch2_sb_disk_groups_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_disk_groups *groups =
+ field_to_type(f, disk_groups);
+ struct bch_disk_group *g, *sorted = NULL;
+ unsigned nr_groups = disk_groups_nr(groups);
+ unsigned i, len;
+ int ret = 0;
+
+ for (i = 0; i < sb->nr_devices; i++) {
+ struct bch_member m = bch2_sb_member_get(sb, i);
+ unsigned group_id;
+
+ if (!BCH_MEMBER_GROUP(&m))
+ continue;
+
+ group_id = BCH_MEMBER_GROUP(&m) - 1;
+
+ if (group_id >= nr_groups) {
+ prt_printf(err, "disk %u has invalid label %u (have %u)",
+ i, group_id, nr_groups);
+ return -BCH_ERR_invalid_sb_disk_groups;
+ }
+
+ if (BCH_GROUP_DELETED(&groups->entries[group_id])) {
+ prt_printf(err, "disk %u has deleted label %u", i, group_id);
+ return -BCH_ERR_invalid_sb_disk_groups;
+ }
+ }
+
+ if (!nr_groups)
+ return 0;
+
+ for (i = 0; i < nr_groups; i++) {
+ g = groups->entries + i;
+
+ if (BCH_GROUP_DELETED(g))
+ continue;
+
+ len = strnlen(g->label, sizeof(g->label));
+ if (!len) {
+ prt_printf(err, "label %u empty", i);
+ return -BCH_ERR_invalid_sb_disk_groups;
+ }
+ }
+
+ sorted = kmalloc_array(nr_groups, sizeof(*sorted), GFP_KERNEL);
+ if (!sorted)
+ return -BCH_ERR_ENOMEM_disk_groups_validate;
+
+ memcpy(sorted, groups->entries, nr_groups * sizeof(*sorted));
+ sort(sorted, nr_groups, sizeof(*sorted), group_cmp, NULL);
+
+ for (g = sorted; g + 1 < sorted + nr_groups; g++)
+ if (!BCH_GROUP_DELETED(g) &&
+ !group_cmp(&g[0], &g[1])) {
+ prt_printf(err, "duplicate label %llu.%.*s",
+ BCH_GROUP_PARENT(g),
+ (int) sizeof(g->label), g->label);
+ ret = -BCH_ERR_invalid_sb_disk_groups;
+ goto err;
+ }
+err:
+ kfree(sorted);
+ return ret;
+}
+
+void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct bch_disk_groups_cpu *g;
+ struct bch_dev *ca;
+ int i;
+ unsigned iter;
+
+ out->atomic++;
+ rcu_read_lock();
+
+ g = rcu_dereference(c->disk_groups);
+ if (!g)
+ goto out;
+
+ for (i = 0; i < g->nr; i++) {
+ if (i)
+ prt_printf(out, " ");
+
+ if (g->entries[i].deleted) {
+ prt_printf(out, "[deleted]");
+ continue;
+ }
+
+ prt_printf(out, "[parent %d devs", g->entries[i].parent);
+ for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs)
+ prt_printf(out, " %s", ca->name);
+ prt_printf(out, "]");
+ }
+
+out:
+ rcu_read_unlock();
+ out->atomic--;
+}
+
+static void bch2_sb_disk_groups_to_text(struct printbuf *out,
+ struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_disk_groups *groups =
+ field_to_type(f, disk_groups);
+ struct bch_disk_group *g;
+ unsigned nr_groups = disk_groups_nr(groups);
+
+ for (g = groups->entries;
+ g < groups->entries + nr_groups;
+ g++) {
+ if (g != groups->entries)
+ prt_printf(out, " ");
+
+ if (BCH_GROUP_DELETED(g))
+ prt_printf(out, "[deleted]");
+ else
+ prt_printf(out, "[parent %llu name %s]",
+ BCH_GROUP_PARENT(g), g->label);
+ }
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_disk_groups = {
+ .validate = bch2_sb_disk_groups_validate,
+ .to_text = bch2_sb_disk_groups_to_text
+};
+
+int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
+{
+ struct bch_sb_field_disk_groups *groups;
+ struct bch_disk_groups_cpu *cpu_g, *old_g;
+ unsigned i, g, nr_groups;
+
+ lockdep_assert_held(&c->sb_lock);
+
+ groups = bch2_sb_field_get(c->disk_sb.sb, disk_groups);
+ nr_groups = disk_groups_nr(groups);
+
+ if (!groups)
+ return 0;
+
+ cpu_g = kzalloc(struct_size(cpu_g, entries, nr_groups), GFP_KERNEL);
+ if (!cpu_g)
+ return -BCH_ERR_ENOMEM_disk_groups_to_cpu;
+
+ cpu_g->nr = nr_groups;
+
+ for (i = 0; i < nr_groups; i++) {
+ struct bch_disk_group *src = &groups->entries[i];
+ struct bch_disk_group_cpu *dst = &cpu_g->entries[i];
+
+ dst->deleted = BCH_GROUP_DELETED(src);
+ dst->parent = BCH_GROUP_PARENT(src);
+ }
+
+ for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
+ struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, i);
+ struct bch_disk_group_cpu *dst;
+
+ if (!bch2_member_exists(&m))
+ continue;
+
+ g = BCH_MEMBER_GROUP(&m);
+ while (g) {
+ dst = &cpu_g->entries[g - 1];
+ __set_bit(i, dst->devs.d);
+ g = dst->parent;
+ }
+ }
+
+ old_g = rcu_dereference_protected(c->disk_groups,
+ lockdep_is_held(&c->sb_lock));
+ rcu_assign_pointer(c->disk_groups, cpu_g);
+ if (old_g)
+ kfree_rcu(old_g, rcu);
+
+ return 0;
+}
+
+const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
+{
+ struct target t = target_decode(target);
+ struct bch_devs_mask *devs;
+
+ rcu_read_lock();
+
+ switch (t.type) {
+ case TARGET_NULL:
+ devs = NULL;
+ break;
+ case TARGET_DEV: {
+ struct bch_dev *ca = t.dev < c->sb.nr_devices
+ ? rcu_dereference(c->devs[t.dev])
+ : NULL;
+ devs = ca ? &ca->self : NULL;
+ break;
+ }
+ case TARGET_GROUP: {
+ struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
+
+ devs = g && t.group < g->nr && !g->entries[t.group].deleted
+ ? &g->entries[t.group].devs
+ : NULL;
+ break;
+ }
+ default:
+ BUG();
+ }
+
+ rcu_read_unlock();
+
+ return devs;
+}
+
+bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target)
+{
+ struct target t = target_decode(target);
+
+ switch (t.type) {
+ case TARGET_NULL:
+ return false;
+ case TARGET_DEV:
+ return dev == t.dev;
+ case TARGET_GROUP: {
+ struct bch_disk_groups_cpu *g;
+ const struct bch_devs_mask *m;
+ bool ret;
+
+ rcu_read_lock();
+ g = rcu_dereference(c->disk_groups);
+ m = g && t.group < g->nr && !g->entries[t.group].deleted
+ ? &g->entries[t.group].devs
+ : NULL;
+
+ ret = m ? test_bit(dev, m->d) : false;
+ rcu_read_unlock();
+
+ return ret;
+ }
+ default:
+ BUG();
+ }
+}
+
+static int __bch2_disk_group_find(struct bch_sb_field_disk_groups *groups,
+ unsigned parent,
+ const char *name, unsigned namelen)
+{
+ unsigned i, nr_groups = disk_groups_nr(groups);
+
+ if (!namelen || namelen > BCH_SB_LABEL_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < nr_groups; i++) {
+ struct bch_disk_group *g = groups->entries + i;
+
+ if (BCH_GROUP_DELETED(g))
+ continue;
+
+ if (!BCH_GROUP_DELETED(g) &&
+ BCH_GROUP_PARENT(g) == parent &&
+ strnlen(g->label, sizeof(g->label)) == namelen &&
+ !memcmp(name, g->label, namelen))
+ return i;
+ }
+
+ return -1;
+}
+
+static int __bch2_disk_group_add(struct bch_sb_handle *sb, unsigned parent,
+ const char *name, unsigned namelen)
+{
+ struct bch_sb_field_disk_groups *groups =
+ bch2_sb_field_get(sb->sb, disk_groups);
+ unsigned i, nr_groups = disk_groups_nr(groups);
+ struct bch_disk_group *g;
+
+ if (!namelen || namelen > BCH_SB_LABEL_SIZE)
+ return -EINVAL;
+
+ for (i = 0;
+ i < nr_groups && !BCH_GROUP_DELETED(&groups->entries[i]);
+ i++)
+ ;
+
+ if (i == nr_groups) {
+ unsigned u64s =
+ (sizeof(struct bch_sb_field_disk_groups) +
+ sizeof(struct bch_disk_group) * (nr_groups + 1)) /
+ sizeof(u64);
+
+ groups = bch2_sb_field_resize(sb, disk_groups, u64s);
+ if (!groups)
+ return -BCH_ERR_ENOSPC_disk_label_add;
+
+ nr_groups = disk_groups_nr(groups);
+ }
+
+ BUG_ON(i >= nr_groups);
+
+ g = &groups->entries[i];
+
+ memcpy(g->label, name, namelen);
+ if (namelen < sizeof(g->label))
+ g->label[namelen] = '\0';
+ SET_BCH_GROUP_DELETED(g, 0);
+ SET_BCH_GROUP_PARENT(g, parent);
+ SET_BCH_GROUP_DATA_ALLOWED(g, ~0);
+
+ return i;
+}
+
+int bch2_disk_path_find(struct bch_sb_handle *sb, const char *name)
+{
+ struct bch_sb_field_disk_groups *groups =
+ bch2_sb_field_get(sb->sb, disk_groups);
+ int v = -1;
+
+ do {
+ const char *next = strchrnul(name, '.');
+ unsigned len = next - name;
+
+ if (*next == '.')
+ next++;
+
+ v = __bch2_disk_group_find(groups, v + 1, name, len);
+ name = next;
+ } while (*name && v >= 0);
+
+ return v;
+}
+
+int bch2_disk_path_find_or_create(struct bch_sb_handle *sb, const char *name)
+{
+ struct bch_sb_field_disk_groups *groups;
+ unsigned parent = 0;
+ int v = -1;
+
+ do {
+ const char *next = strchrnul(name, '.');
+ unsigned len = next - name;
+
+ if (*next == '.')
+ next++;
+
+ groups = bch2_sb_field_get(sb->sb, disk_groups);
+
+ v = __bch2_disk_group_find(groups, parent, name, len);
+ if (v < 0)
+ v = __bch2_disk_group_add(sb, parent, name, len);
+ if (v < 0)
+ return v;
+
+ parent = v + 1;
+ name = next;
+ } while (*name && v >= 0);
+
+ return v;
+}
+
+void bch2_disk_path_to_text(struct printbuf *out, struct bch_sb *sb, unsigned v)
+{
+ struct bch_sb_field_disk_groups *groups =
+ bch2_sb_field_get(sb, disk_groups);
+ struct bch_disk_group *g;
+ unsigned nr = 0;
+ u16 path[32];
+
+ while (1) {
+ if (nr == ARRAY_SIZE(path))
+ goto inval;
+
+ if (v >= disk_groups_nr(groups))
+ goto inval;
+
+ g = groups->entries + v;
+
+ if (BCH_GROUP_DELETED(g))
+ goto inval;
+
+ path[nr++] = v;
+
+ if (!BCH_GROUP_PARENT(g))
+ break;
+
+ v = BCH_GROUP_PARENT(g) - 1;
+ }
+
+ while (nr) {
+ v = path[--nr];
+ g = groups->entries + v;
+
+ prt_printf(out, "%.*s", (int) sizeof(g->label), g->label);
+ if (nr)
+ prt_printf(out, ".");
+ }
+ return;
+inval:
+ prt_printf(out, "invalid label %u", v);
+}
+
+int __bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name)
+{
+ struct bch_member *mi;
+ int ret, v = -1;
+
+ if (!strlen(name) || !strcmp(name, "none"))
+ return 0;
+
+ v = bch2_disk_path_find_or_create(&c->disk_sb, name);
+ if (v < 0)
+ return v;
+
+ ret = bch2_sb_disk_groups_to_cpu(c);
+ if (ret)
+ return ret;
+
+ mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ SET_BCH_MEMBER_GROUP(mi, v + 1);
+ return 0;
+}
+
+int bch2_dev_group_set(struct bch_fs *c, struct bch_dev *ca, const char *name)
+{
+ int ret;
+
+ mutex_lock(&c->sb_lock);
+ ret = __bch2_dev_group_set(c, ca, name) ?:
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ return ret;
+}
+
+int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res,
+ struct printbuf *err)
+{
+ struct bch_dev *ca;
+ int g;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!c)
+ return 0;
+
+ if (!strlen(val) || !strcmp(val, "none")) {
+ *res = 0;
+ return 0;
+ }
+
+ /* Is it a device? */
+ ca = bch2_dev_lookup(c, val);
+ if (!IS_ERR(ca)) {
+ *res = dev_to_target(ca->dev_idx);
+ percpu_ref_put(&ca->ref);
+ return 0;
+ }
+
+ mutex_lock(&c->sb_lock);
+ g = bch2_disk_path_find(&c->disk_sb, val);
+ mutex_unlock(&c->sb_lock);
+
+ if (g >= 0) {
+ *res = group_to_target(g);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void bch2_opt_target_to_text(struct printbuf *out,
+ struct bch_fs *c,
+ struct bch_sb *sb,
+ u64 v)
+{
+ struct target t = target_decode(v);
+
+ switch (t.type) {
+ case TARGET_NULL:
+ prt_printf(out, "none");
+ break;
+ case TARGET_DEV:
+ if (c) {
+ struct bch_dev *ca;
+
+ rcu_read_lock();
+ ca = t.dev < c->sb.nr_devices
+ ? rcu_dereference(c->devs[t.dev])
+ : NULL;
+
+ if (ca && percpu_ref_tryget(&ca->io_ref)) {
+ prt_printf(out, "/dev/%pg", ca->disk_sb.bdev);
+ percpu_ref_put(&ca->io_ref);
+ } else if (ca) {
+ prt_printf(out, "offline device %u", t.dev);
+ } else {
+ prt_printf(out, "invalid device %u", t.dev);
+ }
+
+ rcu_read_unlock();
+ } else {
+ struct bch_member m = bch2_sb_member_get(sb, t.dev);
+
+ if (bch2_dev_exists(sb, t.dev)) {
+ prt_printf(out, "Device ");
+ pr_uuid(out, m.uuid.b);
+ prt_printf(out, " (%u)", t.dev);
+ } else {
+ prt_printf(out, "Bad device %u", t.dev);
+ }
+ }
+ break;
+ case TARGET_GROUP:
+ if (c) {
+ mutex_lock(&c->sb_lock);
+ bch2_disk_path_to_text(out, c->disk_sb.sb, t.group);
+ mutex_unlock(&c->sb_lock);
+ } else {
+ bch2_disk_path_to_text(out, sb, t.group);
+ }
+ break;
+ default:
+ BUG();
+ }
+}
diff --git a/fs/bcachefs/disk_groups.h b/fs/bcachefs/disk_groups.h
new file mode 100644
index 000000000000..bd7711767fd4
--- /dev/null
+++ b/fs/bcachefs/disk_groups.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DISK_GROUPS_H
+#define _BCACHEFS_DISK_GROUPS_H
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_disk_groups;
+
+static inline unsigned disk_groups_nr(struct bch_sb_field_disk_groups *groups)
+{
+ return groups
+ ? (vstruct_end(&groups->field) -
+ (void *) &groups->entries[0]) / sizeof(struct bch_disk_group)
+ : 0;
+}
+
+struct target {
+ enum {
+ TARGET_NULL,
+ TARGET_DEV,
+ TARGET_GROUP,
+ } type;
+ union {
+ unsigned dev;
+ unsigned group;
+ };
+};
+
+#define TARGET_DEV_START 1
+#define TARGET_GROUP_START (256 + TARGET_DEV_START)
+
+static inline u16 dev_to_target(unsigned dev)
+{
+ return TARGET_DEV_START + dev;
+}
+
+static inline u16 group_to_target(unsigned group)
+{
+ return TARGET_GROUP_START + group;
+}
+
+static inline struct target target_decode(unsigned target)
+{
+ if (target >= TARGET_GROUP_START)
+ return (struct target) {
+ .type = TARGET_GROUP,
+ .group = target - TARGET_GROUP_START
+ };
+
+ if (target >= TARGET_DEV_START)
+ return (struct target) {
+ .type = TARGET_DEV,
+ .group = target - TARGET_DEV_START
+ };
+
+ return (struct target) { .type = TARGET_NULL };
+}
+
+const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *, unsigned);
+
+static inline struct bch_devs_mask target_rw_devs(struct bch_fs *c,
+ enum bch_data_type data_type,
+ u16 target)
+{
+ struct bch_devs_mask devs = c->rw_devs[data_type];
+ const struct bch_devs_mask *t = bch2_target_to_mask(c, target);
+
+ if (t)
+ bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
+ return devs;
+}
+
+static inline bool bch2_target_accepts_data(struct bch_fs *c,
+ enum bch_data_type data_type,
+ u16 target)
+{
+ struct bch_devs_mask rw_devs = target_rw_devs(c, data_type, target);
+ return !bitmap_empty(rw_devs.d, BCH_SB_MEMBERS_MAX);
+}
+
+bool bch2_dev_in_target(struct bch_fs *, unsigned, unsigned);
+
+int bch2_disk_path_find(struct bch_sb_handle *, const char *);
+
+/* Exported for userspace bcachefs-tools: */
+int bch2_disk_path_find_or_create(struct bch_sb_handle *, const char *);
+
+void bch2_disk_path_to_text(struct printbuf *, struct bch_sb *, unsigned);
+
+int bch2_opt_target_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
+void bch2_opt_target_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
+
+#define bch2_opt_target (struct bch_opt_fn) { \
+ .parse = bch2_opt_target_parse, \
+ .to_text = bch2_opt_target_to_text, \
+}
+
+int bch2_sb_disk_groups_to_cpu(struct bch_fs *);
+
+int __bch2_dev_group_set(struct bch_fs *, struct bch_dev *, const char *);
+int bch2_dev_group_set(struct bch_fs *, struct bch_dev *, const char *);
+
+const char *bch2_sb_validate_disk_groups(struct bch_sb *,
+ struct bch_sb_field *);
+
+void bch2_disk_groups_to_text(struct printbuf *, struct bch_fs *);
+
+#endif /* _BCACHEFS_DISK_GROUPS_H */
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
new file mode 100644
index 000000000000..8646856e4539
--- /dev/null
+++ b/fs/bcachefs/ec.c
@@ -0,0 +1,1966 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* erasure coding */
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "backpointers.h"
+#include "bkey_buf.h"
+#include "bset.h"
+#include "btree_gc.h"
+#include "btree_update.h"
+#include "btree_write_buffer.h"
+#include "buckets.h"
+#include "checksum.h"
+#include "disk_groups.h"
+#include "ec.h"
+#include "error.h"
+#include "io_read.h"
+#include "keylist.h"
+#include "recovery.h"
+#include "replicas.h"
+#include "super-io.h"
+#include "util.h"
+
+#include <linux/sort.h>
+
+#ifdef __KERNEL__
+
+#include <linux/raid/pq.h>
+#include <linux/raid/xor.h>
+
+static void raid5_recov(unsigned disks, unsigned failed_idx,
+ size_t size, void **data)
+{
+ unsigned i = 2, nr;
+
+ BUG_ON(failed_idx >= disks);
+
+ swap(data[0], data[failed_idx]);
+ memcpy(data[0], data[1], size);
+
+ while (i < disks) {
+ nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
+ xor_blocks(nr, size, data[0], data + i);
+ i += nr;
+ }
+
+ swap(data[0], data[failed_idx]);
+}
+
+static void raid_gen(int nd, int np, size_t size, void **v)
+{
+ if (np >= 1)
+ raid5_recov(nd + np, nd, size, v);
+ if (np >= 2)
+ raid6_call.gen_syndrome(nd + np, size, v);
+ BUG_ON(np > 2);
+}
+
+static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
+{
+ switch (nr) {
+ case 0:
+ break;
+ case 1:
+ if (ir[0] < nd + 1)
+ raid5_recov(nd + 1, ir[0], size, v);
+ else
+ raid6_call.gen_syndrome(nd + np, size, v);
+ break;
+ case 2:
+ if (ir[1] < nd) {
+ /* data+data failure. */
+ raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
+ } else if (ir[0] < nd) {
+ /* data + p/q failure */
+
+ if (ir[1] == nd) /* data + p failure */
+ raid6_datap_recov(nd + np, size, ir[0], v);
+ else { /* data + q failure */
+ raid5_recov(nd + 1, ir[0], size, v);
+ raid6_call.gen_syndrome(nd + np, size, v);
+ }
+ } else {
+ raid_gen(nd, np, size, v);
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+#else
+
+#include <raid/raid.h>
+
+#endif
+
+struct ec_bio {
+ struct bch_dev *ca;
+ struct ec_stripe_buf *buf;
+ size_t idx;
+ struct bio bio;
+};
+
+/* Stripes btree keys: */
+
+int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
+
+ if (bkey_eq(k.k->p, POS_MIN)) {
+ prt_printf(err, "stripe at POS_MIN");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (k.k->p.inode) {
+ prt_printf(err, "nonzero inode field");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) {
+ prt_printf(err, "incorrect value size (%zu < %u)",
+ bkey_val_u64s(k.k), stripe_val_u64s(s));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return bch2_bkey_ptrs_invalid(c, k, flags, err);
+}
+
+void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
+ unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
+
+ prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
+ s->algorithm,
+ le16_to_cpu(s->sectors),
+ nr_data,
+ s->nr_redundant,
+ s->csum_type,
+ 1U << s->csum_granularity_bits);
+
+ for (i = 0; i < s->nr_blocks; i++) {
+ const struct bch_extent_ptr *ptr = s->ptrs + i;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ u32 offset;
+ u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
+
+ prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
+ if (i < nr_data)
+ prt_printf(out, "#%u", stripe_blockcount_get(s, i));
+ if (ptr_stale(ca, ptr))
+ prt_printf(out, " stale");
+ }
+}
+
+/* returns blocknr in stripe that we matched: */
+static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
+ struct bkey_s_c k, unsigned *block)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+ unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ for (i = 0; i < nr_data; i++)
+ if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
+ le16_to_cpu(s->sectors))) {
+ *block = i;
+ return ptr;
+ }
+
+ return NULL;
+}
+
+static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
+{
+ switch (k.k->type) {
+ case KEY_TYPE_extent: {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ const union bch_extent_entry *entry;
+
+ extent_for_each_entry(e, entry)
+ if (extent_entry_type(entry) ==
+ BCH_EXTENT_ENTRY_stripe_ptr &&
+ entry->stripe_ptr.idx == idx)
+ return true;
+
+ break;
+ }
+ }
+
+ return false;
+}
+
+/* Stripe bufs: */
+
+static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
+{
+ if (buf->key.k.type == KEY_TYPE_stripe) {
+ struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
+ unsigned i;
+
+ for (i = 0; i < s->v.nr_blocks; i++) {
+ kvpfree(buf->data[i], buf->size << 9);
+ buf->data[i] = NULL;
+ }
+ }
+}
+
+/* XXX: this is a non-mempoolified memory allocation: */
+static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
+ unsigned offset, unsigned size)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+ unsigned csum_granularity = 1U << v->csum_granularity_bits;
+ unsigned end = offset + size;
+ unsigned i;
+
+ BUG_ON(end > le16_to_cpu(v->sectors));
+
+ offset = round_down(offset, csum_granularity);
+ end = min_t(unsigned, le16_to_cpu(v->sectors),
+ round_up(end, csum_granularity));
+
+ buf->offset = offset;
+ buf->size = end - offset;
+
+ memset(buf->valid, 0xFF, sizeof(buf->valid));
+
+ for (i = 0; i < v->nr_blocks; i++) {
+ buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
+ if (!buf->data[i])
+ goto err;
+ }
+
+ return 0;
+err:
+ ec_stripe_buf_exit(buf);
+ return -BCH_ERR_ENOMEM_stripe_buf;
+}
+
+/* Checksumming: */
+
+static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
+ unsigned block, unsigned offset)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+ unsigned csum_granularity = 1 << v->csum_granularity_bits;
+ unsigned end = buf->offset + buf->size;
+ unsigned len = min(csum_granularity, end - offset);
+
+ BUG_ON(offset >= end);
+ BUG_ON(offset < buf->offset);
+ BUG_ON(offset & (csum_granularity - 1));
+ BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
+ (len & (csum_granularity - 1)));
+
+ return bch2_checksum(NULL, v->csum_type,
+ null_nonce(),
+ buf->data[block] + ((offset - buf->offset) << 9),
+ len << 9);
+}
+
+static void ec_generate_checksums(struct ec_stripe_buf *buf)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+ unsigned i, j, csums_per_device = stripe_csums_per_device(v);
+
+ if (!v->csum_type)
+ return;
+
+ BUG_ON(buf->offset);
+ BUG_ON(buf->size != le16_to_cpu(v->sectors));
+
+ for (i = 0; i < v->nr_blocks; i++)
+ for (j = 0; j < csums_per_device; j++)
+ stripe_csum_set(v, i, j,
+ ec_block_checksum(buf, i, j << v->csum_granularity_bits));
+}
+
+static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+ unsigned csum_granularity = 1 << v->csum_granularity_bits;
+ unsigned i;
+
+ if (!v->csum_type)
+ return;
+
+ for (i = 0; i < v->nr_blocks; i++) {
+ unsigned offset = buf->offset;
+ unsigned end = buf->offset + buf->size;
+
+ if (!test_bit(i, buf->valid))
+ continue;
+
+ while (offset < end) {
+ unsigned j = offset >> v->csum_granularity_bits;
+ unsigned len = min(csum_granularity, end - offset);
+ struct bch_csum want = stripe_csum_get(v, i, j);
+ struct bch_csum got = ec_block_checksum(buf, i, offset);
+
+ if (bch2_crc_cmp(want, got)) {
+ struct printbuf buf2 = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key));
+
+ bch_err_ratelimited(c,
+ "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
+ (void *) _RET_IP_, i, j, v->csum_type,
+ want.lo, got.lo, buf2.buf);
+ printbuf_exit(&buf2);
+ clear_bit(i, buf->valid);
+ break;
+ }
+
+ offset += len;
+ }
+ }
+}
+
+/* Erasure coding: */
+
+static void ec_generate_ec(struct ec_stripe_buf *buf)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+ unsigned nr_data = v->nr_blocks - v->nr_redundant;
+ unsigned bytes = le16_to_cpu(v->sectors) << 9;
+
+ raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
+}
+
+static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+
+ return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
+}
+
+static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+ unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
+ unsigned nr_data = v->nr_blocks - v->nr_redundant;
+ unsigned bytes = buf->size << 9;
+
+ if (ec_nr_failed(buf) > v->nr_redundant) {
+ bch_err_ratelimited(c,
+ "error doing reconstruct read: unable to read enough blocks");
+ return -1;
+ }
+
+ for (i = 0; i < nr_data; i++)
+ if (!test_bit(i, buf->valid))
+ failed[nr_failed++] = i;
+
+ raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
+ return 0;
+}
+
+/* IO: */
+
+static void ec_block_endio(struct bio *bio)
+{
+ struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
+ struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
+ struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
+ struct bch_dev *ca = ec_bio->ca;
+ struct closure *cl = bio->bi_private;
+
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
+ bio_data_dir(bio) ? "write" : "read",
+ bch2_blk_status_to_str(bio->bi_status)))
+ clear_bit(ec_bio->idx, ec_bio->buf->valid);
+
+ if (ptr_stale(ca, ptr)) {
+ bch_err_ratelimited(ca->fs,
+ "error %s stripe: stale pointer after io",
+ bio_data_dir(bio) == READ ? "reading from" : "writing to");
+ clear_bit(ec_bio->idx, ec_bio->buf->valid);
+ }
+
+ bio_put(&ec_bio->bio);
+ percpu_ref_put(&ca->io_ref);
+ closure_put(cl);
+}
+
+static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
+ blk_opf_t opf, unsigned idx, struct closure *cl)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
+ unsigned offset = 0, bytes = buf->size << 9;
+ struct bch_extent_ptr *ptr = &v->ptrs[idx];
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
+ ? BCH_DATA_user
+ : BCH_DATA_parity;
+ int rw = op_is_write(opf);
+
+ if (ptr_stale(ca, ptr)) {
+ bch_err_ratelimited(c,
+ "error %s stripe: stale pointer",
+ rw == READ ? "reading from" : "writing to");
+ clear_bit(idx, buf->valid);
+ return;
+ }
+
+ if (!bch2_dev_get_ioref(ca, rw)) {
+ clear_bit(idx, buf->valid);
+ return;
+ }
+
+ this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
+
+ while (offset < bytes) {
+ unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
+ DIV_ROUND_UP(bytes, PAGE_SIZE));
+ unsigned b = min_t(size_t, bytes - offset,
+ nr_iovecs << PAGE_SHIFT);
+ struct ec_bio *ec_bio;
+
+ ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
+ nr_iovecs,
+ opf,
+ GFP_KERNEL,
+ &c->ec_bioset),
+ struct ec_bio, bio);
+
+ ec_bio->ca = ca;
+ ec_bio->buf = buf;
+ ec_bio->idx = idx;
+
+ ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
+ ec_bio->bio.bi_end_io = ec_block_endio;
+ ec_bio->bio.bi_private = cl;
+
+ bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
+
+ closure_get(cl);
+ percpu_ref_get(&ca->io_ref);
+
+ submit_bio(&ec_bio->bio);
+
+ offset += b;
+ }
+
+ percpu_ref_put(&ca->io_ref);
+}
+
+static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
+ struct ec_stripe_buf *stripe)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
+ POS(0, idx), BTREE_ITER_SLOTS);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+ if (k.k->type != KEY_TYPE_stripe) {
+ ret = -ENOENT;
+ goto err;
+ }
+ bkey_reassemble(&stripe->key, k);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
+{
+ return bch2_trans_run(c, get_stripe_key_trans(trans, idx, stripe));
+}
+
+/* recovery read path: */
+int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
+{
+ struct ec_stripe_buf *buf;
+ struct closure cl;
+ struct bch_stripe *v;
+ unsigned i, offset;
+ int ret = 0;
+
+ closure_init_stack(&cl);
+
+ BUG_ON(!rbio->pick.has_ec);
+
+ buf = kzalloc(sizeof(*buf), GFP_NOFS);
+ if (!buf)
+ return -BCH_ERR_ENOMEM_ec_read_extent;
+
+ ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
+ if (ret) {
+ bch_err_ratelimited(c,
+ "error doing reconstruct read: error %i looking up stripe", ret);
+ kfree(buf);
+ return -EIO;
+ }
+
+ v = &bkey_i_to_stripe(&buf->key)->v;
+
+ if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
+ bch_err_ratelimited(c,
+ "error doing reconstruct read: pointer doesn't match stripe");
+ ret = -EIO;
+ goto err;
+ }
+
+ offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
+ if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
+ bch_err_ratelimited(c,
+ "error doing reconstruct read: read is bigger than stripe");
+ ret = -EIO;
+ goto err;
+ }
+
+ ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
+ if (ret)
+ goto err;
+
+ for (i = 0; i < v->nr_blocks; i++)
+ ec_block_io(c, buf, REQ_OP_READ, i, &cl);
+
+ closure_sync(&cl);
+
+ if (ec_nr_failed(buf) > v->nr_redundant) {
+ bch_err_ratelimited(c,
+ "error doing reconstruct read: unable to read enough blocks");
+ ret = -EIO;
+ goto err;
+ }
+
+ ec_validate_checksums(c, buf);
+
+ ret = ec_do_recov(c, buf);
+ if (ret)
+ goto err;
+
+ memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
+ buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
+err:
+ ec_stripe_buf_exit(buf);
+ kfree(buf);
+ return ret;
+}
+
+/* stripe bucket accounting: */
+
+static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
+{
+ ec_stripes_heap n, *h = &c->ec_stripes_heap;
+
+ if (idx >= h->size) {
+ if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
+ return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
+
+ mutex_lock(&c->ec_stripes_heap_lock);
+ if (n.size > h->size) {
+ memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
+ n.used = h->used;
+ swap(*h, n);
+ }
+ mutex_unlock(&c->ec_stripes_heap_lock);
+
+ free_heap(&n);
+ }
+
+ if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
+ return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
+
+ if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
+ !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
+ return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
+
+ return 0;
+}
+
+static int ec_stripe_mem_alloc(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ return allocate_dropping_locks_errcode(trans,
+ __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
+}
+
+/*
+ * Hash table of open stripes:
+ * Stripes that are being created or modified are kept in a hash table, so that
+ * stripe deletion can skip them.
+ */
+
+static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
+{
+ unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
+ struct ec_stripe_new *s;
+
+ hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
+ if (s->idx == idx)
+ return true;
+ return false;
+}
+
+static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
+{
+ bool ret = false;
+
+ spin_lock(&c->ec_stripes_new_lock);
+ ret = __bch2_stripe_is_open(c, idx);
+ spin_unlock(&c->ec_stripes_new_lock);
+
+ return ret;
+}
+
+static bool bch2_try_open_stripe(struct bch_fs *c,
+ struct ec_stripe_new *s,
+ u64 idx)
+{
+ bool ret;
+
+ spin_lock(&c->ec_stripes_new_lock);
+ ret = !__bch2_stripe_is_open(c, idx);
+ if (ret) {
+ unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
+
+ s->idx = idx;
+ hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
+ }
+ spin_unlock(&c->ec_stripes_new_lock);
+
+ return ret;
+}
+
+static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
+{
+ BUG_ON(!s->idx);
+
+ spin_lock(&c->ec_stripes_new_lock);
+ hlist_del_init(&s->hash);
+ spin_unlock(&c->ec_stripes_new_lock);
+
+ s->idx = 0;
+}
+
+/* Heap of all existing stripes, ordered by blocks_nonempty */
+
+static u64 stripe_idx_to_delete(struct bch_fs *c)
+{
+ ec_stripes_heap *h = &c->ec_stripes_heap;
+
+ lockdep_assert_held(&c->ec_stripes_heap_lock);
+
+ if (h->used &&
+ h->data[0].blocks_nonempty == 0 &&
+ !bch2_stripe_is_open(c, h->data[0].idx))
+ return h->data[0].idx;
+
+ return 0;
+}
+
+static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
+ struct ec_stripe_heap_entry l,
+ struct ec_stripe_heap_entry r)
+{
+ return ((l.blocks_nonempty > r.blocks_nonempty) -
+ (l.blocks_nonempty < r.blocks_nonempty));
+}
+
+static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
+ size_t i)
+{
+ struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
+
+ genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
+}
+
+static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
+{
+ ec_stripes_heap *h = &c->ec_stripes_heap;
+ struct stripe *m = genradix_ptr(&c->stripes, idx);
+
+ BUG_ON(m->heap_idx >= h->used);
+ BUG_ON(h->data[m->heap_idx].idx != idx);
+}
+
+void bch2_stripes_heap_del(struct bch_fs *c,
+ struct stripe *m, size_t idx)
+{
+ mutex_lock(&c->ec_stripes_heap_lock);
+ heap_verify_backpointer(c, idx);
+
+ heap_del(&c->ec_stripes_heap, m->heap_idx,
+ ec_stripes_heap_cmp,
+ ec_stripes_heap_set_backpointer);
+ mutex_unlock(&c->ec_stripes_heap_lock);
+}
+
+void bch2_stripes_heap_insert(struct bch_fs *c,
+ struct stripe *m, size_t idx)
+{
+ mutex_lock(&c->ec_stripes_heap_lock);
+ BUG_ON(heap_full(&c->ec_stripes_heap));
+
+ heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
+ .idx = idx,
+ .blocks_nonempty = m->blocks_nonempty,
+ }),
+ ec_stripes_heap_cmp,
+ ec_stripes_heap_set_backpointer);
+
+ heap_verify_backpointer(c, idx);
+ mutex_unlock(&c->ec_stripes_heap_lock);
+}
+
+void bch2_stripes_heap_update(struct bch_fs *c,
+ struct stripe *m, size_t idx)
+{
+ ec_stripes_heap *h = &c->ec_stripes_heap;
+ bool do_deletes;
+ size_t i;
+
+ mutex_lock(&c->ec_stripes_heap_lock);
+ heap_verify_backpointer(c, idx);
+
+ h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
+
+ i = m->heap_idx;
+ heap_sift_up(h, i, ec_stripes_heap_cmp,
+ ec_stripes_heap_set_backpointer);
+ heap_sift_down(h, i, ec_stripes_heap_cmp,
+ ec_stripes_heap_set_backpointer);
+
+ heap_verify_backpointer(c, idx);
+
+ do_deletes = stripe_idx_to_delete(c) != 0;
+ mutex_unlock(&c->ec_stripes_heap_lock);
+
+ if (do_deletes)
+ bch2_do_stripe_deletes(c);
+}
+
+/* stripe deletion */
+
+static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_stripe s;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
+ BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != KEY_TYPE_stripe) {
+ bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ s = bkey_s_c_to_stripe(k);
+ for (unsigned i = 0; i < s.v->nr_blocks; i++)
+ if (stripe_blockcount_get(s.v, i)) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
+ printbuf_exit(&buf);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = bch2_btree_delete_at(trans, &iter, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static void ec_stripe_delete_work(struct work_struct *work)
+{
+ struct bch_fs *c =
+ container_of(work, struct bch_fs, ec_stripe_delete_work);
+ struct btree_trans *trans = bch2_trans_get(c);
+ int ret;
+ u64 idx;
+
+ while (1) {
+ mutex_lock(&c->ec_stripes_heap_lock);
+ idx = stripe_idx_to_delete(c);
+ mutex_unlock(&c->ec_stripes_heap_lock);
+
+ if (!idx)
+ break;
+
+ ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ ec_stripe_delete(trans, idx));
+ if (ret) {
+ bch_err_fn(c, ret);
+ break;
+ }
+ }
+
+ bch2_trans_put(trans);
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
+}
+
+void bch2_do_stripe_deletes(struct bch_fs *c)
+{
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
+ !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
+}
+
+/* stripe creation: */
+
+static int ec_stripe_key_update(struct btree_trans *trans,
+ struct bkey_i_stripe *new,
+ bool create)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
+ new->k.p, BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) {
+ bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s",
+ create ? "creating" : "updating",
+ bch2_bkey_types[k.k->type]);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (k.k->type == KEY_TYPE_stripe) {
+ const struct bch_stripe *old = bkey_s_c_to_stripe(k).v;
+ unsigned i;
+
+ if (old->nr_blocks != new->v.nr_blocks) {
+ bch_err(c, "error updating stripe: nr_blocks does not match");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < new->v.nr_blocks; i++) {
+ unsigned v = stripe_blockcount_get(old, i);
+
+ BUG_ON(v &&
+ (old->ptrs[i].dev != new->v.ptrs[i].dev ||
+ old->ptrs[i].gen != new->v.ptrs[i].gen ||
+ old->ptrs[i].offset != new->v.ptrs[i].offset));
+
+ stripe_blockcount_set(&new->v, i, v);
+ }
+ }
+
+ ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int ec_stripe_update_extent(struct btree_trans *trans,
+ struct bpos bucket, u8 gen,
+ struct ec_stripe_buf *s,
+ struct bpos *bp_pos)
+{
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
+ struct bch_fs *c = trans->c;
+ struct bch_backpointer bp;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ const struct bch_extent_ptr *ptr_c;
+ struct bch_extent_ptr *ptr, *ec_ptr = NULL;
+ struct bch_extent_stripe_ptr stripe_ptr;
+ struct bkey_i *n;
+ int ret, dev, block;
+
+ ret = bch2_get_next_backpointer(trans, bucket, gen,
+ bp_pos, &bp, BTREE_ITER_CACHED);
+ if (ret)
+ return ret;
+ if (bpos_eq(*bp_pos, SPOS_MAX))
+ return 0;
+
+ if (bp.level) {
+ struct printbuf buf = PRINTBUF;
+ struct btree_iter node_iter;
+ struct btree *b;
+
+ b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp);
+ bch2_trans_iter_exit(trans, &node_iter);
+
+ if (!b)
+ return 0;
+
+ prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
+ bch2_backpointer_to_text(&buf, &bp);
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return -EIO;
+ }
+
+ k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+ if (!k.k) {
+ /*
+ * extent no longer exists - we could flush the btree
+ * write buffer and retry to verify, but no need:
+ */
+ return 0;
+ }
+
+ if (extent_has_stripe_ptr(k, s->key.k.p.offset))
+ goto out;
+
+ ptr_c = bkey_matches_stripe(v, k, &block);
+ /*
+ * It doesn't generally make sense to erasure code cached ptrs:
+ * XXX: should we be incrementing a counter?
+ */
+ if (!ptr_c || ptr_c->cached)
+ goto out;
+
+ dev = v->ptrs[block].dev;
+
+ n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ goto out;
+
+ bkey_reassemble(n, k);
+
+ bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
+ ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
+ BUG_ON(!ec_ptr);
+
+ stripe_ptr = (struct bch_extent_stripe_ptr) {
+ .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
+ .block = block,
+ .redundancy = v->nr_redundant,
+ .idx = s->key.k.p.offset,
+ };
+
+ __extent_entry_insert(n,
+ (union bch_extent_entry *) ec_ptr,
+ (union bch_extent_entry *) &stripe_ptr);
+
+ ret = bch2_trans_update(trans, &iter, n, 0);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
+ unsigned block)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
+ struct bch_extent_ptr bucket = v->ptrs[block];
+ struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
+ struct bpos bp_pos = POS_MIN;
+ int ret = 0;
+
+ while (1) {
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL,
+ ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
+ s, &bp_pos));
+ if (ret)
+ break;
+ if (bkey_eq(bp_pos, POS_MAX))
+ break;
+
+ bp_pos = bpos_nosnap_successor(bp_pos);
+ }
+
+ return ret;
+}
+
+static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
+ unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
+ int ret = 0;
+
+ ret = bch2_btree_write_buffer_flush(trans);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < nr_data; i++) {
+ ret = ec_stripe_update_bucket(trans, s, i);
+ if (ret)
+ break;
+ }
+err:
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
+ struct ec_stripe_new *s,
+ unsigned block,
+ struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ unsigned offset = ca->mi.bucket_size - ob->sectors_free;
+ int ret;
+
+ if (!bch2_dev_get_ioref(ca, WRITE)) {
+ s->err = -BCH_ERR_erofs_no_writes;
+ return;
+ }
+
+ memset(s->new_stripe.data[block] + (offset << 9),
+ 0,
+ ob->sectors_free << 9);
+
+ ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
+ ob->bucket * ca->mi.bucket_size + offset,
+ ob->sectors_free,
+ GFP_KERNEL, 0);
+
+ percpu_ref_put(&ca->io_ref);
+
+ if (ret)
+ s->err = ret;
+}
+
+void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
+{
+ if (s->idx)
+ bch2_stripe_close(c, s);
+ kfree(s);
+}
+
+/*
+ * data buckets of new stripe all written: create the stripe
+ */
+static void ec_stripe_create(struct ec_stripe_new *s)
+{
+ struct bch_fs *c = s->c;
+ struct open_bucket *ob;
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
+ unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
+ int ret;
+
+ BUG_ON(s->h->s == s);
+
+ closure_sync(&s->iodone);
+
+ if (!s->err) {
+ for (i = 0; i < nr_data; i++)
+ if (s->blocks[i]) {
+ ob = c->open_buckets + s->blocks[i];
+
+ if (ob->sectors_free)
+ zero_out_rest_of_ec_bucket(c, s, i, ob);
+ }
+ }
+
+ if (s->err) {
+ if (!bch2_err_matches(s->err, EROFS))
+ bch_err(c, "error creating stripe: error writing data buckets");
+ goto err;
+ }
+
+ if (s->have_existing_stripe) {
+ ec_validate_checksums(c, &s->existing_stripe);
+
+ if (ec_do_recov(c, &s->existing_stripe)) {
+ bch_err(c, "error creating stripe: error reading existing stripe");
+ goto err;
+ }
+
+ for (i = 0; i < nr_data; i++)
+ if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
+ swap(s->new_stripe.data[i],
+ s->existing_stripe.data[i]);
+
+ ec_stripe_buf_exit(&s->existing_stripe);
+ }
+
+ BUG_ON(!s->allocated);
+ BUG_ON(!s->idx);
+
+ ec_generate_ec(&s->new_stripe);
+
+ ec_generate_checksums(&s->new_stripe);
+
+ /* write p/q: */
+ for (i = nr_data; i < v->nr_blocks; i++)
+ ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
+ closure_sync(&s->iodone);
+
+ if (ec_nr_failed(&s->new_stripe)) {
+ bch_err(c, "error creating stripe: error writing redundancy buckets");
+ goto err;
+ }
+
+ ret = bch2_trans_do(c, &s->res, NULL,
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL,
+ ec_stripe_key_update(trans,
+ bkey_i_to_stripe(&s->new_stripe.key),
+ !s->have_existing_stripe));
+ if (ret) {
+ bch_err(c, "error creating stripe: error creating stripe key");
+ goto err;
+ }
+
+ ret = ec_stripe_update_extents(c, &s->new_stripe);
+ if (ret) {
+ bch_err_msg(c, ret, "creating stripe: error updating pointers");
+ goto err;
+ }
+err:
+ bch2_disk_reservation_put(c, &s->res);
+
+ for (i = 0; i < v->nr_blocks; i++)
+ if (s->blocks[i]) {
+ ob = c->open_buckets + s->blocks[i];
+
+ if (i < nr_data) {
+ ob->ec = NULL;
+ __bch2_open_bucket_put(c, ob);
+ } else {
+ bch2_open_bucket_put(c, ob);
+ }
+ }
+
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_del(&s->list);
+ mutex_unlock(&c->ec_stripe_new_lock);
+ wake_up(&c->ec_stripe_new_wait);
+
+ ec_stripe_buf_exit(&s->existing_stripe);
+ ec_stripe_buf_exit(&s->new_stripe);
+ closure_debug_destroy(&s->iodone);
+
+ ec_stripe_new_put(c, s, STRIPE_REF_stripe);
+}
+
+static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
+{
+ struct ec_stripe_new *s;
+
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_for_each_entry(s, &c->ec_stripe_new_list, list)
+ if (!atomic_read(&s->ref[STRIPE_REF_io]))
+ goto out;
+ s = NULL;
+out:
+ mutex_unlock(&c->ec_stripe_new_lock);
+
+ return s;
+}
+
+static void ec_stripe_create_work(struct work_struct *work)
+{
+ struct bch_fs *c = container_of(work,
+ struct bch_fs, ec_stripe_create_work);
+ struct ec_stripe_new *s;
+
+ while ((s = get_pending_stripe(c)))
+ ec_stripe_create(s);
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
+}
+
+void bch2_ec_do_stripe_creates(struct bch_fs *c)
+{
+ bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
+
+ if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
+}
+
+static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
+{
+ struct ec_stripe_new *s = h->s;
+
+ BUG_ON(!s->allocated && !s->err);
+
+ h->s = NULL;
+ s->pending = true;
+
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_add(&s->list, &c->ec_stripe_new_list);
+ mutex_unlock(&c->ec_stripe_new_lock);
+
+ ec_stripe_new_put(c, s, STRIPE_REF_io);
+}
+
+void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
+{
+ struct ec_stripe_new *s = ob->ec;
+
+ s->err = -EIO;
+}
+
+void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
+{
+ struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
+ struct bch_dev *ca;
+ unsigned offset;
+
+ if (!ob)
+ return NULL;
+
+ BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
+
+ ca = bch_dev_bkey_exists(c, ob->dev);
+ offset = ca->mi.bucket_size - ob->sectors_free;
+
+ return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
+}
+
+static int unsigned_cmp(const void *_l, const void *_r)
+{
+ unsigned l = *((const unsigned *) _l);
+ unsigned r = *((const unsigned *) _r);
+
+ return cmp_int(l, r);
+}
+
+/* pick most common bucket size: */
+static unsigned pick_blocksize(struct bch_fs *c,
+ struct bch_devs_mask *devs)
+{
+ struct bch_dev *ca;
+ unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
+ struct {
+ unsigned nr, size;
+ } cur = { 0, 0 }, best = { 0, 0 };
+
+ for_each_member_device_rcu(ca, c, i, devs)
+ sizes[nr++] = ca->mi.bucket_size;
+
+ sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
+
+ for (i = 0; i < nr; i++) {
+ if (sizes[i] != cur.size) {
+ if (cur.nr > best.nr)
+ best = cur;
+
+ cur.nr = 0;
+ cur.size = sizes[i];
+ }
+
+ cur.nr++;
+ }
+
+ if (cur.nr > best.nr)
+ best = cur;
+
+ return best.size;
+}
+
+static bool may_create_new_stripe(struct bch_fs *c)
+{
+ return false;
+}
+
+static void ec_stripe_key_init(struct bch_fs *c,
+ struct bkey_i *k,
+ unsigned nr_data,
+ unsigned nr_parity,
+ unsigned stripe_size)
+{
+ struct bkey_i_stripe *s = bkey_stripe_init(k);
+ unsigned u64s;
+
+ s->v.sectors = cpu_to_le16(stripe_size);
+ s->v.algorithm = 0;
+ s->v.nr_blocks = nr_data + nr_parity;
+ s->v.nr_redundant = nr_parity;
+ s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
+ s->v.csum_type = BCH_CSUM_crc32c;
+ s->v.pad = 0;
+
+ while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
+ BUG_ON(1 << s->v.csum_granularity_bits >=
+ le16_to_cpu(s->v.sectors) ||
+ s->v.csum_granularity_bits == U8_MAX);
+ s->v.csum_granularity_bits++;
+ }
+
+ set_bkey_val_u64s(&s->k, u64s);
+}
+
+static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
+{
+ struct ec_stripe_new *s;
+
+ lockdep_assert_held(&h->lock);
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
+
+ mutex_init(&s->lock);
+ closure_init(&s->iodone, NULL);
+ atomic_set(&s->ref[STRIPE_REF_stripe], 1);
+ atomic_set(&s->ref[STRIPE_REF_io], 1);
+ s->c = c;
+ s->h = h;
+ s->nr_data = min_t(unsigned, h->nr_active_devs,
+ BCH_BKEY_PTRS_MAX) - h->redundancy;
+ s->nr_parity = h->redundancy;
+
+ ec_stripe_key_init(c, &s->new_stripe.key,
+ s->nr_data, s->nr_parity, h->blocksize);
+
+ h->s = s;
+ return 0;
+}
+
+static struct ec_stripe_head *
+ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
+ unsigned algo, unsigned redundancy,
+ enum bch_watermark watermark)
+{
+ struct ec_stripe_head *h;
+ struct bch_dev *ca;
+ unsigned i;
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return NULL;
+
+ mutex_init(&h->lock);
+ BUG_ON(!mutex_trylock(&h->lock));
+
+ h->target = target;
+ h->algo = algo;
+ h->redundancy = redundancy;
+ h->watermark = watermark;
+
+ rcu_read_lock();
+ h->devs = target_rw_devs(c, BCH_DATA_user, target);
+
+ for_each_member_device_rcu(ca, c, i, &h->devs)
+ if (!ca->mi.durability)
+ __clear_bit(i, h->devs.d);
+
+ h->blocksize = pick_blocksize(c, &h->devs);
+
+ for_each_member_device_rcu(ca, c, i, &h->devs)
+ if (ca->mi.bucket_size == h->blocksize)
+ h->nr_active_devs++;
+
+ rcu_read_unlock();
+ list_add(&h->list, &c->ec_stripe_head_list);
+ return h;
+}
+
+void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
+{
+ if (h->s &&
+ h->s->allocated &&
+ bitmap_weight(h->s->blocks_allocated,
+ h->s->nr_data) == h->s->nr_data)
+ ec_stripe_set_pending(c, h);
+
+ mutex_unlock(&h->lock);
+}
+
+static struct ec_stripe_head *
+__bch2_ec_stripe_head_get(struct btree_trans *trans,
+ unsigned target,
+ unsigned algo,
+ unsigned redundancy,
+ enum bch_watermark watermark)
+{
+ struct bch_fs *c = trans->c;
+ struct ec_stripe_head *h;
+ int ret;
+
+ if (!redundancy)
+ return NULL;
+
+ ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (test_bit(BCH_FS_GOING_RO, &c->flags)) {
+ h = ERR_PTR(-BCH_ERR_erofs_no_writes);
+ goto found;
+ }
+
+ list_for_each_entry(h, &c->ec_stripe_head_list, list)
+ if (h->target == target &&
+ h->algo == algo &&
+ h->redundancy == redundancy &&
+ h->watermark == watermark) {
+ ret = bch2_trans_mutex_lock(trans, &h->lock);
+ if (ret)
+ h = ERR_PTR(ret);
+ goto found;
+ }
+
+ h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
+found:
+ mutex_unlock(&c->ec_stripe_head_lock);
+ return h;
+}
+
+static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
+ enum bch_watermark watermark, struct closure *cl)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_devs_mask devs = h->devs;
+ struct open_bucket *ob;
+ struct open_buckets buckets;
+ struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
+ unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
+ bool have_cache = true;
+ int ret = 0;
+
+ BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity);
+ BUG_ON(v->nr_redundant != h->s->nr_parity);
+
+ for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
+ __clear_bit(v->ptrs[i].dev, devs.d);
+ if (i < h->s->nr_data)
+ nr_have_data++;
+ else
+ nr_have_parity++;
+ }
+
+ BUG_ON(nr_have_data > h->s->nr_data);
+ BUG_ON(nr_have_parity > h->s->nr_parity);
+
+ buckets.nr = 0;
+ if (nr_have_parity < h->s->nr_parity) {
+ ret = bch2_bucket_alloc_set_trans(trans, &buckets,
+ &h->parity_stripe,
+ &devs,
+ h->s->nr_parity,
+ &nr_have_parity,
+ &have_cache, 0,
+ BCH_DATA_parity,
+ watermark,
+ cl);
+
+ open_bucket_for_each(c, &buckets, ob, i) {
+ j = find_next_zero_bit(h->s->blocks_gotten,
+ h->s->nr_data + h->s->nr_parity,
+ h->s->nr_data);
+ BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
+
+ h->s->blocks[j] = buckets.v[i];
+ v->ptrs[j] = bch2_ob_ptr(c, ob);
+ __set_bit(j, h->s->blocks_gotten);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ buckets.nr = 0;
+ if (nr_have_data < h->s->nr_data) {
+ ret = bch2_bucket_alloc_set_trans(trans, &buckets,
+ &h->block_stripe,
+ &devs,
+ h->s->nr_data,
+ &nr_have_data,
+ &have_cache, 0,
+ BCH_DATA_user,
+ watermark,
+ cl);
+
+ open_bucket_for_each(c, &buckets, ob, i) {
+ j = find_next_zero_bit(h->s->blocks_gotten,
+ h->s->nr_data, 0);
+ BUG_ON(j >= h->s->nr_data);
+
+ h->s->blocks[j] = buckets.v[i];
+ v->ptrs[j] = bch2_ob_ptr(c, ob);
+ __set_bit(j, h->s->blocks_gotten);
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* XXX: doesn't obey target: */
+static s64 get_existing_stripe(struct bch_fs *c,
+ struct ec_stripe_head *head)
+{
+ ec_stripes_heap *h = &c->ec_stripes_heap;
+ struct stripe *m;
+ size_t heap_idx;
+ u64 stripe_idx;
+ s64 ret = -1;
+
+ if (may_create_new_stripe(c))
+ return -1;
+
+ mutex_lock(&c->ec_stripes_heap_lock);
+ for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
+ /* No blocks worth reusing, stripe will just be deleted: */
+ if (!h->data[heap_idx].blocks_nonempty)
+ continue;
+
+ stripe_idx = h->data[heap_idx].idx;
+
+ m = genradix_ptr(&c->stripes, stripe_idx);
+
+ if (m->algorithm == head->algo &&
+ m->nr_redundant == head->redundancy &&
+ m->sectors == head->blocksize &&
+ m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
+ bch2_try_open_stripe(c, head->s, stripe_idx)) {
+ ret = stripe_idx;
+ break;
+ }
+ }
+ mutex_unlock(&c->ec_stripes_heap_lock);
+ return ret;
+}
+
+static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
+ struct bch_stripe *existing_v;
+ unsigned i;
+ s64 idx;
+ int ret;
+
+ /*
+ * If we can't allocate a new stripe, and there's no stripes with empty
+ * blocks for us to reuse, that means we have to wait on copygc:
+ */
+ idx = get_existing_stripe(c, h);
+ if (idx < 0)
+ return -BCH_ERR_stripe_alloc_blocked;
+
+ ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
+ if (ret) {
+ bch2_stripe_close(c, h->s);
+ if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch2_fs_fatal_error(c, "error reading stripe key: %s", bch2_err_str(ret));
+ return ret;
+ }
+
+ existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v;
+
+ BUG_ON(existing_v->nr_redundant != h->s->nr_parity);
+ h->s->nr_data = existing_v->nr_blocks -
+ existing_v->nr_redundant;
+
+ ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
+ if (ret) {
+ bch2_stripe_close(c, h->s);
+ return ret;
+ }
+
+ BUG_ON(h->s->existing_stripe.size != h->blocksize);
+ BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
+
+ /*
+ * Free buckets we initially allocated - they might conflict with
+ * blocks from the stripe we're reusing:
+ */
+ for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) {
+ bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
+ h->s->blocks[i] = 0;
+ }
+ memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
+ memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
+
+ for (i = 0; i < existing_v->nr_blocks; i++) {
+ if (stripe_blockcount_get(existing_v, i)) {
+ __set_bit(i, h->s->blocks_gotten);
+ __set_bit(i, h->s->blocks_allocated);
+ }
+
+ ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
+ }
+
+ bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key);
+ h->s->have_existing_stripe = true;
+
+ return 0;
+}
+
+static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bpos min_pos = POS(0, 1);
+ struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
+ int ret;
+
+ if (!h->s->res.sectors) {
+ ret = bch2_disk_reservation_get(c, &h->s->res,
+ h->blocksize,
+ h->s->nr_parity,
+ BCH_DISK_RESERVATION_NOFAIL);
+ if (ret)
+ return ret;
+ }
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
+ if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
+ if (start_pos.offset) {
+ start_pos = min_pos;
+ bch2_btree_iter_set_pos(&iter, start_pos);
+ continue;
+ }
+
+ ret = -BCH_ERR_ENOSPC_stripe_create;
+ break;
+ }
+
+ if (bkey_deleted(k.k) &&
+ bch2_try_open_stripe(c, h->s, k.k->p.offset))
+ break;
+ }
+
+ c->ec_stripe_hint = iter.pos.offset;
+
+ if (ret)
+ goto err;
+
+ ret = ec_stripe_mem_alloc(trans, &iter);
+ if (ret) {
+ bch2_stripe_close(c, h->s);
+ goto err;
+ }
+
+ h->s->new_stripe.key.k.p = iter.pos;
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+err:
+ bch2_disk_reservation_put(c, &h->s->res);
+ goto out;
+}
+
+struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
+ unsigned target,
+ unsigned algo,
+ unsigned redundancy,
+ enum bch_watermark watermark,
+ struct closure *cl)
+{
+ struct bch_fs *c = trans->c;
+ struct ec_stripe_head *h;
+ bool waiting = false;
+ int ret;
+
+ h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
+ if (!h)
+ bch_err(c, "no stripe head");
+ if (IS_ERR_OR_NULL(h))
+ return h;
+
+ if (!h->s) {
+ ret = ec_new_stripe_alloc(c, h);
+ if (ret) {
+ bch_err(c, "failed to allocate new stripe");
+ goto err;
+ }
+ }
+
+ if (h->s->allocated)
+ goto allocated;
+
+ if (h->s->have_existing_stripe)
+ goto alloc_existing;
+
+ /* First, try to allocate a full stripe: */
+ ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
+ __bch2_ec_stripe_head_reserve(trans, h);
+ if (!ret)
+ goto allocate_buf;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ bch2_err_matches(ret, ENOMEM))
+ goto err;
+
+ /*
+ * Not enough buckets available for a full stripe: we must reuse an
+ * existing stripe:
+ */
+ while (1) {
+ ret = __bch2_ec_stripe_head_reuse(trans, h);
+ if (!ret)
+ break;
+ if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
+ goto err;
+
+ if (watermark == BCH_WATERMARK_copygc) {
+ ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
+ __bch2_ec_stripe_head_reserve(trans, h);
+ if (ret)
+ goto err;
+ goto allocate_buf;
+ }
+
+ /* XXX freelist_wait? */
+ closure_wait(&c->freelist_wait, cl);
+ waiting = true;
+ }
+
+ if (waiting)
+ closure_wake_up(&c->freelist_wait);
+alloc_existing:
+ /*
+ * Retry allocating buckets, with the watermark for this
+ * particular write:
+ */
+ ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
+ if (ret)
+ goto err;
+
+allocate_buf:
+ ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
+ if (ret)
+ goto err;
+
+ h->s->allocated = true;
+allocated:
+ BUG_ON(!h->s->idx);
+ BUG_ON(!h->s->new_stripe.data[0]);
+ BUG_ON(trans->restarted);
+ return h;
+err:
+ bch2_ec_stripe_head_put(c, h);
+ return ERR_PTR(ret);
+}
+
+static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct ec_stripe_head *h;
+ struct open_bucket *ob;
+ unsigned i;
+
+ mutex_lock(&c->ec_stripe_head_lock);
+ list_for_each_entry(h, &c->ec_stripe_head_list, list) {
+ mutex_lock(&h->lock);
+ if (!h->s)
+ goto unlock;
+
+ if (!ca)
+ goto found;
+
+ for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
+ if (!h->s->blocks[i])
+ continue;
+
+ ob = c->open_buckets + h->s->blocks[i];
+ if (ob->dev == ca->dev_idx)
+ goto found;
+ }
+ goto unlock;
+found:
+ h->s->err = -BCH_ERR_erofs_no_writes;
+ ec_stripe_set_pending(c, h);
+unlock:
+ mutex_unlock(&h->lock);
+ }
+ mutex_unlock(&c->ec_stripe_head_lock);
+}
+
+void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
+{
+ __bch2_ec_stop(c, ca);
+}
+
+void bch2_fs_ec_stop(struct bch_fs *c)
+{
+ __bch2_ec_stop(c, NULL);
+}
+
+static bool bch2_fs_ec_flush_done(struct bch_fs *c)
+{
+ bool ret;
+
+ mutex_lock(&c->ec_stripe_new_lock);
+ ret = list_empty(&c->ec_stripe_new_list);
+ mutex_unlock(&c->ec_stripe_new_lock);
+
+ return ret;
+}
+
+void bch2_fs_ec_flush(struct bch_fs *c)
+{
+ wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
+}
+
+int bch2_stripes_read(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ const struct bch_stripe *s;
+ struct stripe *m;
+ unsigned i;
+ int ret;
+
+ for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
+ BTREE_ITER_PREFETCH, k, ret) {
+ if (k.k->type != KEY_TYPE_stripe)
+ continue;
+
+ ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
+ if (ret)
+ break;
+
+ s = bkey_s_c_to_stripe(k).v;
+
+ m = genradix_ptr(&c->stripes, k.k->p.offset);
+ m->sectors = le16_to_cpu(s->sectors);
+ m->algorithm = s->algorithm;
+ m->nr_blocks = s->nr_blocks;
+ m->nr_redundant = s->nr_redundant;
+ m->blocks_nonempty = 0;
+
+ for (i = 0; i < s->nr_blocks; i++)
+ m->blocks_nonempty += !!stripe_blockcount_get(s, i);
+
+ bch2_stripes_heap_insert(c, m, k.k->p.offset);
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ bch2_trans_put(trans);
+
+ if (ret)
+ bch_err_fn(c, ret);
+
+ return ret;
+}
+
+void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ ec_stripes_heap *h = &c->ec_stripes_heap;
+ struct stripe *m;
+ size_t i;
+
+ mutex_lock(&c->ec_stripes_heap_lock);
+ for (i = 0; i < min_t(size_t, h->used, 50); i++) {
+ m = genradix_ptr(&c->stripes, h->data[i].idx);
+
+ prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
+ h->data[i].blocks_nonempty,
+ m->nr_blocks - m->nr_redundant,
+ m->nr_redundant);
+ if (bch2_stripe_is_open(c, h->data[i].idx))
+ prt_str(out, " open");
+ prt_newline(out);
+ }
+ mutex_unlock(&c->ec_stripes_heap_lock);
+}
+
+void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct ec_stripe_head *h;
+ struct ec_stripe_new *s;
+
+ mutex_lock(&c->ec_stripe_head_lock);
+ list_for_each_entry(h, &c->ec_stripe_head_list, list) {
+ prt_printf(out, "target %u algo %u redundancy %u %s:\n",
+ h->target, h->algo, h->redundancy,
+ bch2_watermarks[h->watermark]);
+
+ if (h->s)
+ prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
+ h->s->idx, h->s->nr_data, h->s->nr_parity,
+ bitmap_weight(h->s->blocks_allocated,
+ h->s->nr_data));
+ }
+ mutex_unlock(&c->ec_stripe_head_lock);
+
+ prt_printf(out, "in flight:\n");
+
+ mutex_lock(&c->ec_stripe_new_lock);
+ list_for_each_entry(s, &c->ec_stripe_new_list, list) {
+ prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n",
+ s->idx, s->nr_data, s->nr_parity,
+ atomic_read(&s->ref[STRIPE_REF_io]),
+ atomic_read(&s->ref[STRIPE_REF_stripe]),
+ bch2_watermarks[s->h->watermark]);
+ }
+ mutex_unlock(&c->ec_stripe_new_lock);
+}
+
+void bch2_fs_ec_exit(struct bch_fs *c)
+{
+ struct ec_stripe_head *h;
+ unsigned i;
+
+ while (1) {
+ mutex_lock(&c->ec_stripe_head_lock);
+ h = list_first_entry_or_null(&c->ec_stripe_head_list,
+ struct ec_stripe_head, list);
+ if (h)
+ list_del(&h->list);
+ mutex_unlock(&c->ec_stripe_head_lock);
+ if (!h)
+ break;
+
+ if (h->s) {
+ for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
+ BUG_ON(h->s->blocks[i]);
+
+ kfree(h->s);
+ }
+ kfree(h);
+ }
+
+ BUG_ON(!list_empty(&c->ec_stripe_new_list));
+
+ free_heap(&c->ec_stripes_heap);
+ genradix_free(&c->stripes);
+ bioset_exit(&c->ec_bioset);
+}
+
+void bch2_fs_ec_init_early(struct bch_fs *c)
+{
+ spin_lock_init(&c->ec_stripes_new_lock);
+ mutex_init(&c->ec_stripes_heap_lock);
+
+ INIT_LIST_HEAD(&c->ec_stripe_head_list);
+ mutex_init(&c->ec_stripe_head_lock);
+
+ INIT_LIST_HEAD(&c->ec_stripe_new_list);
+ mutex_init(&c->ec_stripe_new_lock);
+ init_waitqueue_head(&c->ec_stripe_new_wait);
+
+ INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
+ INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
+}
+
+int bch2_fs_ec_init(struct bch_fs *c)
+{
+ return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),
+ BIOSET_NEED_BVECS);
+}
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
new file mode 100644
index 000000000000..966d165a3b66
--- /dev/null
+++ b/fs/bcachefs/ec.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EC_H
+#define _BCACHEFS_EC_H
+
+#include "ec_types.h"
+#include "buckets_types.h"
+#include "extents_types.h"
+
+enum bkey_invalid_flags;
+
+int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+
+#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
+ .key_invalid = bch2_stripe_invalid, \
+ .val_to_text = bch2_stripe_to_text, \
+ .swab = bch2_ptr_swab, \
+ .trans_trigger = bch2_trans_mark_stripe, \
+ .atomic_trigger = bch2_mark_stripe, \
+ .min_val_size = 8, \
+})
+
+static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
+{
+ return DIV_ROUND_UP(le16_to_cpu(s->sectors),
+ 1 << s->csum_granularity_bits);
+}
+
+static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
+ unsigned dev, unsigned csum_idx)
+{
+ unsigned csum_bytes = bch_crc_bytes[s->csum_type];
+
+ return sizeof(struct bch_stripe) +
+ sizeof(struct bch_extent_ptr) * s->nr_blocks +
+ (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
+}
+
+static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
+ unsigned idx)
+{
+ return stripe_csum_offset(s, s->nr_blocks, 0) +
+ sizeof(u16) * idx;
+}
+
+static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
+ unsigned idx)
+{
+ return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
+}
+
+static inline void stripe_blockcount_set(struct bch_stripe *s,
+ unsigned idx, unsigned v)
+{
+ __le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
+
+ *p = cpu_to_le16(v);
+}
+
+static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
+{
+ return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
+ sizeof(u64));
+}
+
+static inline void *stripe_csum(struct bch_stripe *s,
+ unsigned block, unsigned csum_idx)
+{
+ EBUG_ON(block >= s->nr_blocks);
+ EBUG_ON(csum_idx >= stripe_csums_per_device(s));
+
+ return (void *) s + stripe_csum_offset(s, block, csum_idx);
+}
+
+static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
+ unsigned block, unsigned csum_idx)
+{
+ struct bch_csum csum = { 0 };
+
+ memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
+ return csum;
+}
+
+static inline void stripe_csum_set(struct bch_stripe *s,
+ unsigned block, unsigned csum_idx,
+ struct bch_csum csum)
+{
+ memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
+}
+
+static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
+ const struct bch_extent_ptr *data_ptr,
+ unsigned sectors)
+{
+ return data_ptr->dev == stripe_ptr->dev &&
+ data_ptr->gen == stripe_ptr->gen &&
+ data_ptr->offset >= stripe_ptr->offset &&
+ data_ptr->offset < stripe_ptr->offset + sectors;
+}
+
+static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
+ struct extent_ptr_decoded p)
+{
+ unsigned nr_data = s->nr_blocks - s->nr_redundant;
+
+ BUG_ON(!p.has_ec);
+
+ if (p.ec.block >= nr_data)
+ return false;
+
+ return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
+ le16_to_cpu(s->sectors));
+}
+
+static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
+ struct extent_ptr_decoded p)
+{
+ unsigned nr_data = m->nr_blocks - m->nr_redundant;
+
+ BUG_ON(!p.has_ec);
+
+ if (p.ec.block >= nr_data)
+ return false;
+
+ return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
+ m->sectors);
+}
+
+struct bch_read_bio;
+
+struct ec_stripe_buf {
+ /* might not be buffering the entire stripe: */
+ unsigned offset;
+ unsigned size;
+ unsigned long valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
+
+ void *data[BCH_BKEY_PTRS_MAX];
+
+ __BKEY_PADDED(key, 255);
+};
+
+struct ec_stripe_head;
+
+enum ec_stripe_ref {
+ STRIPE_REF_io,
+ STRIPE_REF_stripe,
+ STRIPE_REF_NR
+};
+
+struct ec_stripe_new {
+ struct bch_fs *c;
+ struct ec_stripe_head *h;
+ struct mutex lock;
+ struct list_head list;
+
+ struct hlist_node hash;
+ u64 idx;
+
+ struct closure iodone;
+
+ atomic_t ref[STRIPE_REF_NR];
+
+ int err;
+
+ u8 nr_data;
+ u8 nr_parity;
+ bool allocated;
+ bool pending;
+ bool have_existing_stripe;
+
+ unsigned long blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
+ unsigned long blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
+ open_bucket_idx_t blocks[BCH_BKEY_PTRS_MAX];
+ struct disk_reservation res;
+
+ struct ec_stripe_buf new_stripe;
+ struct ec_stripe_buf existing_stripe;
+};
+
+struct ec_stripe_head {
+ struct list_head list;
+ struct mutex lock;
+
+ unsigned target;
+ unsigned algo;
+ unsigned redundancy;
+ enum bch_watermark watermark;
+
+ struct bch_devs_mask devs;
+ unsigned nr_active_devs;
+
+ unsigned blocksize;
+
+ struct dev_stripe_state block_stripe;
+ struct dev_stripe_state parity_stripe;
+
+ struct ec_stripe_new *s;
+};
+
+int bch2_ec_read_extent(struct bch_fs *, struct bch_read_bio *);
+
+void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
+
+void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
+
+int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
+
+void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
+struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
+ unsigned, unsigned, unsigned,
+ enum bch_watermark, struct closure *);
+
+void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
+void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
+void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
+
+void bch2_do_stripe_deletes(struct bch_fs *);
+void bch2_ec_do_stripe_creates(struct bch_fs *);
+void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
+
+static inline void ec_stripe_new_get(struct ec_stripe_new *s,
+ enum ec_stripe_ref ref)
+{
+ atomic_inc(&s->ref[ref]);
+}
+
+static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
+ enum ec_stripe_ref ref)
+{
+ BUG_ON(atomic_read(&s->ref[ref]) <= 0);
+
+ if (atomic_dec_and_test(&s->ref[ref]))
+ switch (ref) {
+ case STRIPE_REF_stripe:
+ bch2_ec_stripe_new_free(c, s);
+ break;
+ case STRIPE_REF_io:
+ bch2_ec_do_stripe_creates(c);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
+void bch2_fs_ec_stop(struct bch_fs *);
+void bch2_fs_ec_flush(struct bch_fs *);
+
+int bch2_stripes_read(struct bch_fs *);
+
+void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
+void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
+
+void bch2_fs_ec_exit(struct bch_fs *);
+void bch2_fs_ec_init_early(struct bch_fs *);
+int bch2_fs_ec_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_EC_H */
diff --git a/fs/bcachefs/ec_types.h b/fs/bcachefs/ec_types.h
new file mode 100644
index 000000000000..e2b02a82de32
--- /dev/null
+++ b/fs/bcachefs/ec_types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EC_TYPES_H
+#define _BCACHEFS_EC_TYPES_H
+
+#include "bcachefs_format.h"
+
+struct bch_replicas_padded {
+ struct bch_replicas_entry e;
+ u8 pad[BCH_BKEY_PTRS_MAX];
+};
+
+struct stripe {
+ size_t heap_idx;
+ u16 sectors;
+ u8 algorithm;
+ u8 nr_blocks;
+ u8 nr_redundant;
+ u8 blocks_nonempty;
+};
+
+struct gc_stripe {
+ u16 sectors;
+
+ u8 nr_blocks;
+ u8 nr_redundant;
+
+ unsigned alive:1; /* does a corresponding key exist in stripes btree? */
+ u16 block_sectors[BCH_BKEY_PTRS_MAX];
+ struct bch_extent_ptr ptrs[BCH_BKEY_PTRS_MAX];
+
+ struct bch_replicas_padded r;
+};
+
+struct ec_stripe_heap_entry {
+ size_t idx;
+ unsigned blocks_nonempty;
+};
+
+typedef HEAP(struct ec_stripe_heap_entry) ec_stripes_heap;
+
+#endif /* _BCACHEFS_EC_TYPES_H */
diff --git a/fs/bcachefs/errcode.c b/fs/bcachefs/errcode.c
new file mode 100644
index 000000000000..d260ff9bbfeb
--- /dev/null
+++ b/fs/bcachefs/errcode.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "errcode.h"
+
+#include <linux/errname.h>
+
+static const char * const bch2_errcode_strs[] = {
+#define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = #err,
+ BCH_ERRCODES()
+#undef x
+ NULL
+};
+
+static unsigned bch2_errcode_parents[] = {
+#define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = class,
+ BCH_ERRCODES()
+#undef x
+};
+
+const char *bch2_err_str(int err)
+{
+ const char *errstr;
+
+ err = abs(err);
+
+ BUG_ON(err >= BCH_ERR_MAX);
+
+ if (err >= BCH_ERR_START)
+ errstr = bch2_errcode_strs[err - BCH_ERR_START];
+ else if (err)
+ errstr = errname(err);
+ else
+ errstr = "(No error)";
+ return errstr ?: "(Invalid error)";
+}
+
+bool __bch2_err_matches(int err, int class)
+{
+ err = abs(err);
+ class = abs(class);
+
+ BUG_ON(err >= BCH_ERR_MAX);
+ BUG_ON(class >= BCH_ERR_MAX);
+
+ while (err >= BCH_ERR_START && err != class)
+ err = bch2_errcode_parents[err - BCH_ERR_START];
+
+ return err == class;
+}
+
+int __bch2_err_class(int err)
+{
+ err = -err;
+ BUG_ON((unsigned) err >= BCH_ERR_MAX);
+
+ while (err >= BCH_ERR_START && bch2_errcode_parents[err - BCH_ERR_START])
+ err = bch2_errcode_parents[err - BCH_ERR_START];
+
+ return -err;
+}
+
+const char *bch2_blk_status_to_str(blk_status_t status)
+{
+ if (status == BLK_STS_REMOVED)
+ return "device removed";
+ return blk_status_to_str(status);
+}
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
new file mode 100644
index 000000000000..7cc083776a2e
--- /dev/null
+++ b/fs/bcachefs/errcode.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ERRCODE_H
+#define _BCACHEFS_ERRCODE_H
+
+#define BCH_ERRCODES() \
+ x(ENOMEM, ENOMEM_stripe_buf) \
+ x(ENOMEM, ENOMEM_replicas_table) \
+ x(ENOMEM, ENOMEM_cpu_replicas) \
+ x(ENOMEM, ENOMEM_replicas_gc) \
+ x(ENOMEM, ENOMEM_disk_groups_validate) \
+ x(ENOMEM, ENOMEM_disk_groups_to_cpu) \
+ x(ENOMEM, ENOMEM_mark_snapshot) \
+ x(ENOMEM, ENOMEM_mark_stripe) \
+ x(ENOMEM, ENOMEM_mark_stripe_ptr) \
+ x(ENOMEM, ENOMEM_btree_key_cache_create) \
+ x(ENOMEM, ENOMEM_btree_key_cache_fill) \
+ x(ENOMEM, ENOMEM_btree_key_cache_insert) \
+ x(ENOMEM, ENOMEM_trans_kmalloc) \
+ x(ENOMEM, ENOMEM_trans_log_msg) \
+ x(ENOMEM, ENOMEM_do_encrypt) \
+ x(ENOMEM, ENOMEM_ec_read_extent) \
+ x(ENOMEM, ENOMEM_ec_stripe_mem_alloc) \
+ x(ENOMEM, ENOMEM_ec_new_stripe_alloc) \
+ x(ENOMEM, ENOMEM_fs_btree_cache_init) \
+ x(ENOMEM, ENOMEM_fs_btree_key_cache_init) \
+ x(ENOMEM, ENOMEM_fs_counters_init) \
+ x(ENOMEM, ENOMEM_fs_btree_write_buffer_init) \
+ x(ENOMEM, ENOMEM_io_clock_init) \
+ x(ENOMEM, ENOMEM_blacklist_table_init) \
+ x(ENOMEM, ENOMEM_sb_realloc_injected) \
+ x(ENOMEM, ENOMEM_sb_bio_realloc) \
+ x(ENOMEM, ENOMEM_sb_buf_realloc) \
+ x(ENOMEM, ENOMEM_sb_journal_validate) \
+ x(ENOMEM, ENOMEM_sb_journal_v2_validate) \
+ x(ENOMEM, ENOMEM_journal_entry_add) \
+ x(ENOMEM, ENOMEM_journal_read_buf_realloc) \
+ x(ENOMEM, ENOMEM_btree_interior_update_worker_init)\
+ x(ENOMEM, ENOMEM_btree_interior_update_pool_init) \
+ x(ENOMEM, ENOMEM_bio_read_init) \
+ x(ENOMEM, ENOMEM_bio_read_split_init) \
+ x(ENOMEM, ENOMEM_bio_write_init) \
+ x(ENOMEM, ENOMEM_bio_bounce_pages_init) \
+ x(ENOMEM, ENOMEM_writepage_bioset_init) \
+ x(ENOMEM, ENOMEM_dio_read_bioset_init) \
+ x(ENOMEM, ENOMEM_dio_write_bioset_init) \
+ x(ENOMEM, ENOMEM_nocow_flush_bioset_init) \
+ x(ENOMEM, ENOMEM_promote_table_init) \
+ x(ENOMEM, ENOMEM_compression_bounce_read_init) \
+ x(ENOMEM, ENOMEM_compression_bounce_write_init) \
+ x(ENOMEM, ENOMEM_compression_workspace_init) \
+ x(ENOMEM, ENOMEM_decompression_workspace_init) \
+ x(ENOMEM, ENOMEM_bucket_gens) \
+ x(ENOMEM, ENOMEM_buckets_nouse) \
+ x(ENOMEM, ENOMEM_usage_init) \
+ x(ENOMEM, ENOMEM_btree_node_read_all_replicas) \
+ x(ENOMEM, ENOMEM_btree_node_reclaim) \
+ x(ENOMEM, ENOMEM_btree_node_mem_alloc) \
+ x(ENOMEM, ENOMEM_btree_cache_cannibalize_lock) \
+ x(ENOMEM, ENOMEM_buckets_waiting_for_journal_init)\
+ x(ENOMEM, ENOMEM_buckets_waiting_for_journal_set) \
+ x(ENOMEM, ENOMEM_set_nr_journal_buckets) \
+ x(ENOMEM, ENOMEM_dev_journal_init) \
+ x(ENOMEM, ENOMEM_journal_pin_fifo) \
+ x(ENOMEM, ENOMEM_journal_buf) \
+ x(ENOMEM, ENOMEM_gc_start) \
+ x(ENOMEM, ENOMEM_gc_alloc_start) \
+ x(ENOMEM, ENOMEM_gc_reflink_start) \
+ x(ENOMEM, ENOMEM_gc_gens) \
+ x(ENOMEM, ENOMEM_gc_repair_key) \
+ x(ENOMEM, ENOMEM_fsck_extent_ends_at) \
+ x(ENOMEM, ENOMEM_fsck_add_nlink) \
+ x(ENOMEM, ENOMEM_journal_key_insert) \
+ x(ENOMEM, ENOMEM_journal_keys_sort) \
+ x(ENOMEM, ENOMEM_journal_replay) \
+ x(ENOMEM, ENOMEM_read_superblock_clean) \
+ x(ENOMEM, ENOMEM_fs_alloc) \
+ x(ENOMEM, ENOMEM_fs_name_alloc) \
+ x(ENOMEM, ENOMEM_fs_other_alloc) \
+ x(ENOMEM, ENOMEM_dev_alloc) \
+ x(ENOSPC, ENOSPC_disk_reservation) \
+ x(ENOSPC, ENOSPC_bucket_alloc) \
+ x(ENOSPC, ENOSPC_disk_label_add) \
+ x(ENOSPC, ENOSPC_stripe_create) \
+ x(ENOSPC, ENOSPC_inode_create) \
+ x(ENOSPC, ENOSPC_str_hash_create) \
+ x(ENOSPC, ENOSPC_snapshot_create) \
+ x(ENOSPC, ENOSPC_subvolume_create) \
+ x(ENOSPC, ENOSPC_sb) \
+ x(ENOSPC, ENOSPC_sb_journal) \
+ x(ENOSPC, ENOSPC_sb_journal_seq_blacklist) \
+ x(ENOSPC, ENOSPC_sb_quota) \
+ x(ENOSPC, ENOSPC_sb_replicas) \
+ x(ENOSPC, ENOSPC_sb_members) \
+ x(ENOSPC, ENOSPC_sb_members_v2) \
+ x(ENOSPC, ENOSPC_sb_crypt) \
+ x(ENOSPC, ENOSPC_btree_slot) \
+ x(ENOSPC, ENOSPC_snapshot_tree) \
+ x(ENOENT, ENOENT_bkey_type_mismatch) \
+ x(ENOENT, ENOENT_str_hash_lookup) \
+ x(ENOENT, ENOENT_str_hash_set_must_replace) \
+ x(ENOENT, ENOENT_inode) \
+ x(ENOENT, ENOENT_not_subvol) \
+ x(ENOENT, ENOENT_not_directory) \
+ x(ENOENT, ENOENT_directory_dead) \
+ x(ENOENT, ENOENT_subvolume) \
+ x(ENOENT, ENOENT_snapshot_tree) \
+ x(ENOENT, ENOENT_dirent_doesnt_match_inode) \
+ x(ENOENT, ENOENT_dev_not_found) \
+ x(ENOENT, ENOENT_dev_idx_not_found) \
+ x(0, open_buckets_empty) \
+ x(0, freelist_empty) \
+ x(BCH_ERR_freelist_empty, no_buckets_found) \
+ x(0, transaction_restart) \
+ x(BCH_ERR_transaction_restart, transaction_restart_fault_inject) \
+ x(BCH_ERR_transaction_restart, transaction_restart_relock) \
+ x(BCH_ERR_transaction_restart, transaction_restart_relock_path) \
+ x(BCH_ERR_transaction_restart, transaction_restart_relock_path_intent) \
+ x(BCH_ERR_transaction_restart, transaction_restart_relock_after_fill) \
+ x(BCH_ERR_transaction_restart, transaction_restart_too_many_iters) \
+ x(BCH_ERR_transaction_restart, transaction_restart_lock_node_reused) \
+ x(BCH_ERR_transaction_restart, transaction_restart_fill_relock) \
+ x(BCH_ERR_transaction_restart, transaction_restart_fill_mem_alloc_fail)\
+ x(BCH_ERR_transaction_restart, transaction_restart_mem_realloced) \
+ x(BCH_ERR_transaction_restart, transaction_restart_in_traverse_all) \
+ x(BCH_ERR_transaction_restart, transaction_restart_would_deadlock) \
+ x(BCH_ERR_transaction_restart, transaction_restart_would_deadlock_write)\
+ x(BCH_ERR_transaction_restart, transaction_restart_deadlock_recursion_limit)\
+ x(BCH_ERR_transaction_restart, transaction_restart_upgrade) \
+ x(BCH_ERR_transaction_restart, transaction_restart_key_cache_upgrade) \
+ x(BCH_ERR_transaction_restart, transaction_restart_key_cache_fill) \
+ x(BCH_ERR_transaction_restart, transaction_restart_key_cache_raced) \
+ x(BCH_ERR_transaction_restart, transaction_restart_key_cache_realloced)\
+ x(BCH_ERR_transaction_restart, transaction_restart_journal_preres_get) \
+ x(BCH_ERR_transaction_restart, transaction_restart_split_race) \
+ x(BCH_ERR_transaction_restart, transaction_restart_write_buffer_flush) \
+ x(BCH_ERR_transaction_restart, transaction_restart_nested) \
+ x(0, no_btree_node) \
+ x(BCH_ERR_no_btree_node, no_btree_node_relock) \
+ x(BCH_ERR_no_btree_node, no_btree_node_upgrade) \
+ x(BCH_ERR_no_btree_node, no_btree_node_drop) \
+ x(BCH_ERR_no_btree_node, no_btree_node_lock_root) \
+ x(BCH_ERR_no_btree_node, no_btree_node_up) \
+ x(BCH_ERR_no_btree_node, no_btree_node_down) \
+ x(BCH_ERR_no_btree_node, no_btree_node_init) \
+ x(BCH_ERR_no_btree_node, no_btree_node_cached) \
+ x(BCH_ERR_no_btree_node, no_btree_node_srcu_reset) \
+ x(0, btree_insert_fail) \
+ x(BCH_ERR_btree_insert_fail, btree_insert_btree_node_full) \
+ x(BCH_ERR_btree_insert_fail, btree_insert_need_mark_replicas) \
+ x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_res) \
+ x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_reclaim) \
+ x(BCH_ERR_btree_insert_fail, btree_insert_need_flush_buffer) \
+ x(0, backpointer_to_overwritten_btree_node) \
+ x(0, lock_fail_root_changed) \
+ x(0, journal_reclaim_would_deadlock) \
+ x(EINVAL, fsck) \
+ x(BCH_ERR_fsck, fsck_fix) \
+ x(BCH_ERR_fsck, fsck_ignore) \
+ x(BCH_ERR_fsck, fsck_errors_not_fixed) \
+ x(BCH_ERR_fsck, fsck_repair_unimplemented) \
+ x(BCH_ERR_fsck, fsck_repair_impossible) \
+ x(0, restart_recovery) \
+ x(0, unwritten_extent_update) \
+ x(EINVAL, device_state_not_allowed) \
+ x(EINVAL, member_info_missing) \
+ x(EINVAL, mismatched_block_size) \
+ x(EINVAL, block_size_too_small) \
+ x(EINVAL, bucket_size_too_small) \
+ x(EINVAL, device_size_too_small) \
+ x(EINVAL, device_not_a_member_of_filesystem) \
+ x(EINVAL, device_has_been_removed) \
+ x(EINVAL, device_already_online) \
+ x(EINVAL, insufficient_devices_to_start) \
+ x(EINVAL, invalid) \
+ x(EINVAL, internal_fsck_err) \
+ x(EROFS, erofs_trans_commit) \
+ x(EROFS, erofs_no_writes) \
+ x(EROFS, erofs_journal_err) \
+ x(EROFS, erofs_sb_err) \
+ x(EROFS, erofs_unfixed_errors) \
+ x(EROFS, erofs_norecovery) \
+ x(EROFS, erofs_nochanges) \
+ x(EROFS, insufficient_devices) \
+ x(0, operation_blocked) \
+ x(BCH_ERR_operation_blocked, btree_cache_cannibalize_lock_blocked) \
+ x(BCH_ERR_operation_blocked, journal_res_get_blocked) \
+ x(BCH_ERR_operation_blocked, journal_preres_get_blocked) \
+ x(BCH_ERR_operation_blocked, bucket_alloc_blocked) \
+ x(BCH_ERR_operation_blocked, stripe_alloc_blocked) \
+ x(BCH_ERR_invalid, invalid_sb) \
+ x(BCH_ERR_invalid_sb, invalid_sb_magic) \
+ x(BCH_ERR_invalid_sb, invalid_sb_version) \
+ x(BCH_ERR_invalid_sb, invalid_sb_features) \
+ x(BCH_ERR_invalid_sb, invalid_sb_too_big) \
+ x(BCH_ERR_invalid_sb, invalid_sb_csum_type) \
+ x(BCH_ERR_invalid_sb, invalid_sb_csum) \
+ x(BCH_ERR_invalid_sb, invalid_sb_block_size) \
+ x(BCH_ERR_invalid_sb, invalid_sb_uuid) \
+ x(BCH_ERR_invalid_sb, invalid_sb_too_many_members) \
+ x(BCH_ERR_invalid_sb, invalid_sb_dev_idx) \
+ x(BCH_ERR_invalid_sb, invalid_sb_time_precision) \
+ x(BCH_ERR_invalid_sb, invalid_sb_field_size) \
+ x(BCH_ERR_invalid_sb, invalid_sb_layout) \
+ x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_type) \
+ x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_nr_superblocks) \
+ x(BCH_ERR_invalid_sb_layout, invalid_sb_layout_superblocks_overlap) \
+ x(BCH_ERR_invalid_sb, invalid_sb_members_missing) \
+ x(BCH_ERR_invalid_sb, invalid_sb_members) \
+ x(BCH_ERR_invalid_sb, invalid_sb_disk_groups) \
+ x(BCH_ERR_invalid_sb, invalid_sb_replicas) \
+ x(BCH_ERR_invalid_sb, invalid_sb_journal) \
+ x(BCH_ERR_invalid_sb, invalid_sb_journal_seq_blacklist) \
+ x(BCH_ERR_invalid_sb, invalid_sb_crypt) \
+ x(BCH_ERR_invalid_sb, invalid_sb_clean) \
+ x(BCH_ERR_invalid_sb, invalid_sb_quota) \
+ x(BCH_ERR_invalid, invalid_bkey) \
+ x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
+ x(EIO, btree_node_read_err) \
+ x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \
+ x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \
+ x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \
+ x(BCH_ERR_btree_node_read_err, btree_node_read_err_bad_node) \
+ x(BCH_ERR_btree_node_read_err, btree_node_read_err_incompatible) \
+ x(0, nopromote) \
+ x(BCH_ERR_nopromote, nopromote_may_not) \
+ x(BCH_ERR_nopromote, nopromote_already_promoted) \
+ x(BCH_ERR_nopromote, nopromote_unwritten) \
+ x(BCH_ERR_nopromote, nopromote_congested) \
+ x(BCH_ERR_nopromote, nopromote_in_flight) \
+ x(BCH_ERR_nopromote, nopromote_enomem)
+
+enum bch_errcode {
+ BCH_ERR_START = 2048,
+#define x(class, err) BCH_ERR_##err,
+ BCH_ERRCODES()
+#undef x
+ BCH_ERR_MAX
+};
+
+const char *bch2_err_str(int);
+bool __bch2_err_matches(int, int);
+
+static inline bool _bch2_err_matches(int err, int class)
+{
+ return err < 0 && __bch2_err_matches(err, class);
+}
+
+#define bch2_err_matches(_err, _class) \
+({ \
+ BUILD_BUG_ON(!__builtin_constant_p(_class)); \
+ unlikely(_bch2_err_matches(_err, _class)); \
+})
+
+int __bch2_err_class(int);
+
+static inline long bch2_err_class(long err)
+{
+ return err < 0 ? __bch2_err_class(err) : err;
+}
+
+#define BLK_STS_REMOVED ((__force blk_status_t)128)
+
+const char *bch2_blk_status_to_str(blk_status_t);
+
+#endif /* _BCACHFES_ERRCODE_H */
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
new file mode 100644
index 000000000000..2a5af8872613
--- /dev/null
+++ b/fs/bcachefs/error.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "error.h"
+#include "super.h"
+
+#define FSCK_ERR_RATELIMIT_NR 10
+
+bool bch2_inconsistent_error(struct bch_fs *c)
+{
+ set_bit(BCH_FS_ERROR, &c->flags);
+
+ switch (c->opts.errors) {
+ case BCH_ON_ERROR_continue:
+ return false;
+ case BCH_ON_ERROR_ro:
+ if (bch2_fs_emergency_read_only(c))
+ bch_err(c, "inconsistency detected - emergency read only");
+ return true;
+ case BCH_ON_ERROR_panic:
+ panic(bch2_fmt(c, "panic after error"));
+ return true;
+ default:
+ BUG();
+ }
+}
+
+void bch2_topology_error(struct bch_fs *c)
+{
+ set_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags);
+ if (test_bit(BCH_FS_FSCK_DONE, &c->flags))
+ bch2_inconsistent_error(c);
+}
+
+void bch2_fatal_error(struct bch_fs *c)
+{
+ if (bch2_fs_emergency_read_only(c))
+ bch_err(c, "fatal error - emergency read only");
+}
+
+void bch2_io_error_work(struct work_struct *work)
+{
+ struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work);
+ struct bch_fs *c = ca->fs;
+ bool dev;
+
+ down_write(&c->state_lock);
+ dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro,
+ BCH_FORCE_IF_DEGRADED);
+ if (dev
+ ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
+ BCH_FORCE_IF_DEGRADED)
+ : bch2_fs_emergency_read_only(c))
+ bch_err(ca,
+ "too many IO errors, setting %s RO",
+ dev ? "device" : "filesystem");
+ up_write(&c->state_lock);
+}
+
+void bch2_io_error(struct bch_dev *ca)
+{
+ //queue_work(system_long_wq, &ca->io_error_work);
+}
+
+enum ask_yn {
+ YN_NO,
+ YN_YES,
+ YN_ALLNO,
+ YN_ALLYES,
+};
+
+#ifdef __KERNEL__
+#define bch2_fsck_ask_yn() YN_NO
+#else
+
+#include "tools-util.h"
+
+enum ask_yn bch2_fsck_ask_yn(void)
+{
+ char *buf = NULL;
+ size_t buflen = 0;
+ bool ret;
+
+ while (true) {
+ fputs(" (y,n, or Y,N for all errors of this type) ", stdout);
+ fflush(stdout);
+
+ if (getline(&buf, &buflen, stdin) < 0)
+ die("error reading from standard input");
+
+ strim(buf);
+ if (strlen(buf) != 1)
+ continue;
+
+ switch (buf[0]) {
+ case 'n':
+ return YN_NO;
+ case 'y':
+ return YN_YES;
+ case 'N':
+ return YN_ALLNO;
+ case 'Y':
+ return YN_ALLYES;
+ }
+ }
+
+ free(buf);
+ return ret;
+}
+
+#endif
+
+static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
+{
+ struct fsck_err_state *s;
+
+ if (test_bit(BCH_FS_FSCK_DONE, &c->flags))
+ return NULL;
+
+ list_for_each_entry(s, &c->fsck_errors, list)
+ if (s->fmt == fmt) {
+ /*
+ * move it to the head of the list: repeated fsck errors
+ * are common
+ */
+ list_move(&s->list, &c->fsck_errors);
+ return s;
+ }
+
+ s = kzalloc(sizeof(*s), GFP_NOFS);
+ if (!s) {
+ if (!c->fsck_alloc_err)
+ bch_err(c, "kmalloc err, cannot ratelimit fsck errs");
+ c->fsck_alloc_err = true;
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&s->list);
+ s->fmt = fmt;
+ list_add(&s->list, &c->fsck_errors);
+ return s;
+}
+
+int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
+{
+ struct fsck_err_state *s = NULL;
+ va_list args;
+ bool print = true, suppressing = false, inconsistent = false;
+ struct printbuf buf = PRINTBUF, *out = &buf;
+ int ret = -BCH_ERR_fsck_ignore;
+
+ va_start(args, fmt);
+ prt_vprintf(out, fmt, args);
+ va_end(args);
+
+ mutex_lock(&c->fsck_error_lock);
+ s = fsck_err_get(c, fmt);
+ if (s) {
+ /*
+ * We may be called multiple times for the same error on
+ * transaction restart - this memoizes instead of asking the user
+ * multiple times for the same error:
+ */
+ if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
+ ret = s->ret;
+ mutex_unlock(&c->fsck_error_lock);
+ printbuf_exit(&buf);
+ return ret;
+ }
+
+ kfree(s->last_msg);
+ s->last_msg = kstrdup(buf.buf, GFP_KERNEL);
+
+ if (c->opts.ratelimit_errors &&
+ !(flags & FSCK_NO_RATELIMIT) &&
+ s->nr >= FSCK_ERR_RATELIMIT_NR) {
+ if (s->nr == FSCK_ERR_RATELIMIT_NR)
+ suppressing = true;
+ else
+ print = false;
+ }
+
+ s->nr++;
+ }
+
+#ifdef BCACHEFS_LOG_PREFIX
+ if (!strncmp(fmt, "bcachefs:", 9))
+ prt_printf(out, bch2_log_msg(c, ""));
+#endif
+
+ if (test_bit(BCH_FS_FSCK_DONE, &c->flags)) {
+ if (c->opts.errors != BCH_ON_ERROR_continue ||
+ !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
+ prt_str(out, ", shutting down");
+ inconsistent = true;
+ ret = -BCH_ERR_fsck_errors_not_fixed;
+ } else if (flags & FSCK_CAN_FIX) {
+ prt_str(out, ", fixing");
+ ret = -BCH_ERR_fsck_fix;
+ } else {
+ prt_str(out, ", continuing");
+ ret = -BCH_ERR_fsck_ignore;
+ }
+ } else if (c->opts.fix_errors == FSCK_FIX_exit) {
+ prt_str(out, ", exiting");
+ ret = -BCH_ERR_fsck_errors_not_fixed;
+ } else if (flags & FSCK_CAN_FIX) {
+ int fix = s && s->fix
+ ? s->fix
+ : c->opts.fix_errors;
+
+ if (fix == FSCK_FIX_ask) {
+ int ask;
+
+ prt_str(out, ": fix?");
+ bch2_print_string_as_lines(KERN_ERR, out->buf);
+ print = false;
+
+ ask = bch2_fsck_ask_yn();
+
+ if (ask >= YN_ALLNO && s)
+ s->fix = ask == YN_ALLNO
+ ? FSCK_FIX_no
+ : FSCK_FIX_yes;
+
+ ret = ask & 1
+ ? -BCH_ERR_fsck_fix
+ : -BCH_ERR_fsck_ignore;
+ } else if (fix == FSCK_FIX_yes ||
+ (c->opts.nochanges &&
+ !(flags & FSCK_CAN_IGNORE))) {
+ prt_str(out, ", fixing");
+ ret = -BCH_ERR_fsck_fix;
+ } else {
+ prt_str(out, ", not fixing");
+ }
+ } else if (flags & FSCK_NEED_FSCK) {
+ prt_str(out, " (run fsck to correct)");
+ } else {
+ prt_str(out, " (repair unimplemented)");
+ }
+
+ if (ret == -BCH_ERR_fsck_ignore &&
+ (c->opts.fix_errors == FSCK_FIX_exit ||
+ !(flags & FSCK_CAN_IGNORE)))
+ ret = -BCH_ERR_fsck_errors_not_fixed;
+
+ if (print)
+ bch2_print_string_as_lines(KERN_ERR, out->buf);
+
+ if (!test_bit(BCH_FS_FSCK_DONE, &c->flags) &&
+ (ret != -BCH_ERR_fsck_fix &&
+ ret != -BCH_ERR_fsck_ignore))
+ bch_err(c, "Unable to continue, halting");
+ else if (suppressing)
+ bch_err(c, "Ratelimiting new instances of previous error");
+
+ if (s)
+ s->ret = ret;
+
+ mutex_unlock(&c->fsck_error_lock);
+
+ printbuf_exit(&buf);
+
+ if (inconsistent)
+ bch2_inconsistent_error(c);
+
+ if (ret == -BCH_ERR_fsck_fix) {
+ set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
+ } else {
+ set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags);
+ set_bit(BCH_FS_ERROR, &c->flags);
+ }
+
+ return ret;
+}
+
+void bch2_flush_fsck_errs(struct bch_fs *c)
+{
+ struct fsck_err_state *s, *n;
+
+ mutex_lock(&c->fsck_error_lock);
+
+ list_for_each_entry_safe(s, n, &c->fsck_errors, list) {
+ if (s->ratelimited && s->last_msg)
+ bch_err(c, "Saw %llu errors like:\n %s", s->nr, s->last_msg);
+
+ list_del(&s->list);
+ kfree(s->last_msg);
+ kfree(s);
+ }
+
+ mutex_unlock(&c->fsck_error_lock);
+}
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
new file mode 100644
index 000000000000..7ce9540052e5
--- /dev/null
+++ b/fs/bcachefs/error.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_ERROR_H
+#define _BCACHEFS_ERROR_H
+
+#include <linux/list.h>
+#include <linux/printk.h>
+
+struct bch_dev;
+struct bch_fs;
+struct work_struct;
+
+/*
+ * XXX: separate out errors that indicate on disk data is inconsistent, and flag
+ * superblock as such
+ */
+
+/* Error messages: */
+
+/*
+ * Inconsistency errors: The on disk data is inconsistent. If these occur during
+ * initial recovery, they don't indicate a bug in the running code - we walk all
+ * the metadata before modifying anything. If they occur at runtime, they
+ * indicate either a bug in the running code or (less likely) data is being
+ * silently corrupted under us.
+ *
+ * XXX: audit all inconsistent errors and make sure they're all recoverable, in
+ * BCH_ON_ERROR_CONTINUE mode
+ */
+
+bool bch2_inconsistent_error(struct bch_fs *);
+
+void bch2_topology_error(struct bch_fs *);
+
+#define bch2_fs_inconsistent(c, ...) \
+({ \
+ bch_err(c, __VA_ARGS__); \
+ bch2_inconsistent_error(c); \
+})
+
+#define bch2_fs_inconsistent_on(cond, c, ...) \
+({ \
+ bool _ret = unlikely(!!(cond)); \
+ \
+ if (_ret) \
+ bch2_fs_inconsistent(c, __VA_ARGS__); \
+ _ret; \
+})
+
+/*
+ * Later we might want to mark only the particular device inconsistent, not the
+ * entire filesystem:
+ */
+
+#define bch2_dev_inconsistent(ca, ...) \
+do { \
+ bch_err(ca, __VA_ARGS__); \
+ bch2_inconsistent_error((ca)->fs); \
+} while (0)
+
+#define bch2_dev_inconsistent_on(cond, ca, ...) \
+({ \
+ bool _ret = unlikely(!!(cond)); \
+ \
+ if (_ret) \
+ bch2_dev_inconsistent(ca, __VA_ARGS__); \
+ _ret; \
+})
+
+/*
+ * When a transaction update discovers or is causing a fs inconsistency, it's
+ * helpful to also dump the pending updates:
+ */
+#define bch2_trans_inconsistent(trans, ...) \
+({ \
+ bch_err(trans->c, __VA_ARGS__); \
+ bch2_dump_trans_updates(trans); \
+ bch2_inconsistent_error(trans->c); \
+})
+
+#define bch2_trans_inconsistent_on(cond, trans, ...) \
+({ \
+ bool _ret = unlikely(!!(cond)); \
+ \
+ if (_ret) \
+ bch2_trans_inconsistent(trans, __VA_ARGS__); \
+ _ret; \
+})
+
+/*
+ * Fsck errors: inconsistency errors we detect at mount time, and should ideally
+ * be able to repair:
+ */
+
+struct fsck_err_state {
+ struct list_head list;
+ const char *fmt;
+ u64 nr;
+ bool ratelimited;
+ int ret;
+ int fix;
+ char *last_msg;
+};
+
+#define FSCK_CAN_FIX (1 << 0)
+#define FSCK_CAN_IGNORE (1 << 1)
+#define FSCK_NEED_FSCK (1 << 2)
+#define FSCK_NO_RATELIMIT (1 << 3)
+
+__printf(3, 4) __cold
+int bch2_fsck_err(struct bch_fs *, unsigned, const char *, ...);
+void bch2_flush_fsck_errs(struct bch_fs *);
+
+#define __fsck_err(c, _flags, msg, ...) \
+({ \
+ int _ret = bch2_fsck_err(c, _flags, msg, ##__VA_ARGS__); \
+ \
+ if (_ret != -BCH_ERR_fsck_fix && \
+ _ret != -BCH_ERR_fsck_ignore) { \
+ ret = _ret; \
+ goto fsck_err; \
+ } \
+ \
+ _ret == -BCH_ERR_fsck_fix; \
+})
+
+/* These macros return true if error should be fixed: */
+
+/* XXX: mark in superblock that filesystem contains errors, if we ignore: */
+
+#define __fsck_err_on(cond, c, _flags, ...) \
+ (unlikely(cond) ? __fsck_err(c, _flags, ##__VA_ARGS__) : false)
+
+#define need_fsck_err_on(cond, c, ...) \
+ __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__)
+
+#define need_fsck_err(c, ...) \
+ __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__)
+
+#define mustfix_fsck_err(c, ...) \
+ __fsck_err(c, FSCK_CAN_FIX, ##__VA_ARGS__)
+
+#define mustfix_fsck_err_on(cond, c, ...) \
+ __fsck_err_on(cond, c, FSCK_CAN_FIX, ##__VA_ARGS__)
+
+#define fsck_err(c, ...) \
+ __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__)
+
+#define fsck_err_on(cond, c, ...) \
+ __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__)
+
+/*
+ * Fatal errors: these don't indicate a bug, but we can't continue running in RW
+ * mode - pretty much just due to metadata IO errors:
+ */
+
+void bch2_fatal_error(struct bch_fs *);
+
+#define bch2_fs_fatal_error(c, ...) \
+do { \
+ bch_err(c, __VA_ARGS__); \
+ bch2_fatal_error(c); \
+} while (0)
+
+#define bch2_fs_fatal_err_on(cond, c, ...) \
+({ \
+ bool _ret = unlikely(!!(cond)); \
+ \
+ if (_ret) \
+ bch2_fs_fatal_error(c, __VA_ARGS__); \
+ _ret; \
+})
+
+/*
+ * IO errors: either recoverable metadata IO (because we have replicas), or data
+ * IO - we need to log it and print out a message, but we don't (necessarily)
+ * want to shut down the fs:
+ */
+
+void bch2_io_error_work(struct work_struct *);
+
+/* Does the error handling without logging a message */
+void bch2_io_error(struct bch_dev *);
+
+#define bch2_dev_io_err_on(cond, ca, ...) \
+({ \
+ bool _ret = (cond); \
+ \
+ if (_ret) { \
+ bch_err_dev_ratelimited(ca, __VA_ARGS__); \
+ bch2_io_error(ca); \
+ } \
+ _ret; \
+})
+
+#define bch2_dev_inum_io_err_on(cond, ca, ...) \
+({ \
+ bool _ret = (cond); \
+ \
+ if (_ret) { \
+ bch_err_inum_offset_ratelimited(ca, __VA_ARGS__); \
+ bch2_io_error(ca); \
+ } \
+ _ret; \
+})
+
+#endif /* _BCACHEFS_ERROR_H */
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
new file mode 100644
index 000000000000..21af6fb8cecf
--- /dev/null
+++ b/fs/bcachefs/extent_update.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
+#include "debug.h"
+#include "extents.h"
+#include "extent_update.h"
+
+/*
+ * This counts the number of iterators to the alloc & ec btrees we'll need
+ * inserting/removing this extent:
+ */
+static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ unsigned ret = 0, lru = 0;
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ switch (__extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ /* Might also be updating LRU btree */
+ if (entry->ptr.cached)
+ lru++;
+
+ fallthrough;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ ret++;
+ }
+ }
+
+ /*
+ * Updating keys in the alloc btree may also update keys in the
+ * freespace or discard btrees:
+ */
+ return lru + ret * 2;
+}
+
+static int count_iters_for_insert(struct btree_trans *trans,
+ struct bkey_s_c k,
+ unsigned offset,
+ struct bpos *end,
+ unsigned *nr_iters,
+ unsigned max_iters)
+{
+ int ret = 0, ret2 = 0;
+
+ if (*nr_iters >= max_iters) {
+ *end = bpos_min(*end, k.k->p);
+ ret = 1;
+ }
+
+ switch (k.k->type) {
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v:
+ *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
+
+ if (*nr_iters >= max_iters) {
+ *end = bpos_min(*end, k.k->p);
+ ret = 1;
+ }
+
+ break;
+ case KEY_TYPE_reflink_p: {
+ struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
+ u64 idx = le64_to_cpu(p.v->idx);
+ unsigned sectors = bpos_min(*end, p.k->p).offset -
+ bkey_start_offset(p.k);
+ struct btree_iter iter;
+ struct bkey_s_c r_k;
+
+ for_each_btree_key_norestart(trans, iter,
+ BTREE_ID_reflink, POS(0, idx + offset),
+ BTREE_ITER_SLOTS, r_k, ret2) {
+ if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
+ break;
+
+ /* extent_update_to_keys(), for the reflink_v update */
+ *nr_iters += 1;
+
+ *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
+
+ if (*nr_iters >= max_iters) {
+ struct bpos pos = bkey_start_pos(k.k);
+ pos.offset += min_t(u64, k.k->size,
+ r_k.k->p.offset - idx);
+
+ *end = bpos_min(*end, pos);
+ ret = 1;
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ break;
+ }
+ }
+
+ return ret2 ?: ret;
+}
+
+#define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
+
+int bch2_extent_atomic_end(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *insert,
+ struct bpos *end)
+{
+ struct btree_iter copy;
+ struct bkey_s_c k;
+ unsigned nr_iters = 0;
+ int ret;
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
+ *end = insert->k.p;
+
+ /* extent_update_to_keys(): */
+ nr_iters += 1;
+
+ ret = count_iters_for_insert(trans, bkey_i_to_s_c(insert), 0, end,
+ &nr_iters, EXTENT_ITERS_MAX / 2);
+ if (ret < 0)
+ return ret;
+
+ bch2_trans_copy_iter(&copy, iter);
+
+ for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) {
+ unsigned offset = 0;
+
+ if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
+ offset = bkey_start_offset(&insert->k) -
+ bkey_start_offset(k.k);
+
+ /* extent_handle_overwrites(): */
+ switch (bch2_extent_overlap(&insert->k, k.k)) {
+ case BCH_EXTENT_OVERLAP_ALL:
+ case BCH_EXTENT_OVERLAP_FRONT:
+ nr_iters += 1;
+ break;
+ case BCH_EXTENT_OVERLAP_BACK:
+ case BCH_EXTENT_OVERLAP_MIDDLE:
+ nr_iters += 2;
+ break;
+ }
+
+ ret = count_iters_for_insert(trans, k, offset, end,
+ &nr_iters, EXTENT_ITERS_MAX);
+ if (ret)
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &copy);
+ return ret < 0 ? ret : 0;
+}
+
+int bch2_extent_trim_atomic(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *k)
+{
+ struct bpos end;
+ int ret;
+
+ ret = bch2_extent_atomic_end(trans, iter, k, &end);
+ if (ret)
+ return ret;
+
+ bch2_cut_back(end, k);
+ return 0;
+}
diff --git a/fs/bcachefs/extent_update.h b/fs/bcachefs/extent_update.h
new file mode 100644
index 000000000000..6f5cf449361a
--- /dev/null
+++ b/fs/bcachefs/extent_update.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EXTENT_UPDATE_H
+#define _BCACHEFS_EXTENT_UPDATE_H
+
+#include "bcachefs.h"
+
+int bch2_extent_atomic_end(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *, struct bpos *);
+int bch2_extent_trim_atomic(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *);
+
+#endif /* _BCACHEFS_EXTENT_UPDATE_H */
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
new file mode 100644
index 000000000000..1b25f84e4b9c
--- /dev/null
+++ b/fs/bcachefs/extents.c
@@ -0,0 +1,1403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
+ *
+ * Code for managing the extent btree and dynamically updating the writeback
+ * dirty sector count.
+ */
+
+#include "bcachefs.h"
+#include "bkey_methods.h"
+#include "btree_gc.h"
+#include "btree_io.h"
+#include "btree_iter.h"
+#include "buckets.h"
+#include "checksum.h"
+#include "debug.h"
+#include "disk_groups.h"
+#include "error.h"
+#include "extents.h"
+#include "inode.h"
+#include "journal.h"
+#include "replicas.h"
+#include "super.h"
+#include "super-io.h"
+#include "trace.h"
+#include "util.h"
+
+static unsigned bch2_crc_field_size_max[] = {
+ [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
+ [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
+};
+
+static void bch2_extent_crc_pack(union bch_extent_crc *,
+ struct bch_extent_crc_unpacked,
+ enum bch_extent_entry_type);
+
+static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
+ unsigned dev)
+{
+ struct bch_dev_io_failures *i;
+
+ for (i = f->devs; i < f->devs + f->nr; i++)
+ if (i->dev == dev)
+ return i;
+
+ return NULL;
+}
+
+void bch2_mark_io_failure(struct bch_io_failures *failed,
+ struct extent_ptr_decoded *p)
+{
+ struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);
+
+ if (!f) {
+ BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
+
+ f = &failed->devs[failed->nr++];
+ f->dev = p->ptr.dev;
+ f->idx = p->idx;
+ f->nr_failed = 1;
+ f->nr_retries = 0;
+ } else if (p->idx != f->idx) {
+ f->idx = p->idx;
+ f->nr_failed = 1;
+ f->nr_retries = 0;
+ } else {
+ f->nr_failed++;
+ }
+}
+
+/*
+ * returns true if p1 is better than p2:
+ */
+static inline bool ptr_better(struct bch_fs *c,
+ const struct extent_ptr_decoded p1,
+ const struct extent_ptr_decoded p2)
+{
+ if (likely(!p1.idx && !p2.idx)) {
+ struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
+ struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
+
+ u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
+ u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
+
+ /* Pick at random, biased in favor of the faster device: */
+
+ return bch2_rand_range(l1 + l2) > l1;
+ }
+
+ if (bch2_force_reconstruct_read)
+ return p1.idx > p2.idx;
+
+ return p1.idx < p2.idx;
+}
+
+/*
+ * This picks a non-stale pointer, preferably from a device other than @avoid.
+ * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
+ * other devices, it will still pick a pointer from avoid.
+ */
+int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_io_failures *failed,
+ struct extent_ptr_decoded *pick)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct bch_dev_io_failures *f;
+ struct bch_dev *ca;
+ int ret = 0;
+
+ if (k.k->type == KEY_TYPE_error)
+ return -EIO;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ /*
+ * Unwritten extent: no need to actually read, treat it as a
+ * hole and return 0s:
+ */
+ if (p.ptr.unwritten)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, p.ptr.dev);
+
+ /*
+ * If there are any dirty pointers it's an error if we can't
+ * read:
+ */
+ if (!ret && !p.ptr.cached)
+ ret = -EIO;
+
+ if (p.ptr.cached && ptr_stale(ca, &p.ptr))
+ continue;
+
+ f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
+ if (f)
+ p.idx = f->nr_failed < f->nr_retries
+ ? f->idx
+ : f->idx + 1;
+
+ if (!p.idx &&
+ !bch2_dev_is_readable(ca))
+ p.idx++;
+
+ if (bch2_force_reconstruct_read &&
+ !p.idx && p.has_ec)
+ p.idx++;
+
+ if (p.idx >= (unsigned) p.has_ec + 1)
+ continue;
+
+ if (ret > 0 && !ptr_better(c, p, *pick))
+ continue;
+
+ *pick = p;
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* KEY_TYPE_btree_ptr: */
+
+int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
+ prt_printf(err, "value too big (%zu > %u)",
+ bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return bch2_bkey_ptrs_invalid(c, k, flags, err);
+}
+
+void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ bch2_bkey_ptrs_to_text(out, c, k);
+}
+
+int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
+ prt_printf(err, "value too big (%zu > %zu)",
+ bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return bch2_bkey_ptrs_invalid(c, k, flags, err);
+}
+
+void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
+
+ prt_printf(out, "seq %llx written %u min_key %s",
+ le64_to_cpu(bp.v->seq),
+ le16_to_cpu(bp.v->sectors_written),
+ BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
+
+ bch2_bpos_to_text(out, bp.v->min_key);
+ prt_printf(out, " ");
+ bch2_bkey_ptrs_to_text(out, c, k);
+}
+
+void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
+ unsigned big_endian, int write,
+ struct bkey_s k)
+{
+ struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k);
+
+ compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key);
+
+ if (version < bcachefs_metadata_version_inode_btree_change &&
+ btree_id_is_extents(btree_id) &&
+ !bkey_eq(bp.v->min_key, POS_MIN))
+ bp.v->min_key = write
+ ? bpos_nosnap_predecessor(bp.v->min_key)
+ : bpos_nosnap_successor(bp.v->min_key);
+}
+
+/* KEY_TYPE_extent: */
+
+bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
+{
+ struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l);
+ struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r);
+ union bch_extent_entry *en_l;
+ const union bch_extent_entry *en_r;
+ struct extent_ptr_decoded lp, rp;
+ bool use_right_ptr;
+ struct bch_dev *ca;
+
+ en_l = l_ptrs.start;
+ en_r = r_ptrs.start;
+ while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
+ if (extent_entry_type(en_l) != extent_entry_type(en_r))
+ return false;
+
+ en_l = extent_entry_next(en_l);
+ en_r = extent_entry_next(en_r);
+ }
+
+ if (en_l < l_ptrs.end || en_r < r_ptrs.end)
+ return false;
+
+ en_l = l_ptrs.start;
+ en_r = r_ptrs.start;
+ lp.crc = bch2_extent_crc_unpack(l.k, NULL);
+ rp.crc = bch2_extent_crc_unpack(r.k, NULL);
+
+ while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) &&
+ __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) {
+ if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size !=
+ rp.ptr.offset + rp.crc.offset ||
+ lp.ptr.dev != rp.ptr.dev ||
+ lp.ptr.gen != rp.ptr.gen ||
+ lp.ptr.unwritten != rp.ptr.unwritten ||
+ lp.has_ec != rp.has_ec)
+ return false;
+
+ /* Extents may not straddle buckets: */
+ ca = bch_dev_bkey_exists(c, lp.ptr.dev);
+ if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
+ return false;
+
+ if (lp.has_ec != rp.has_ec ||
+ (lp.has_ec &&
+ (lp.ec.block != rp.ec.block ||
+ lp.ec.redundancy != rp.ec.redundancy ||
+ lp.ec.idx != rp.ec.idx)))
+ return false;
+
+ if (lp.crc.compression_type != rp.crc.compression_type ||
+ lp.crc.nonce != rp.crc.nonce)
+ return false;
+
+ if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
+ lp.crc.uncompressed_size) {
+ /* can use left extent's crc entry */
+ } else if (lp.crc.live_size <= rp.crc.offset) {
+ /* can use right extent's crc entry */
+ } else {
+ /* check if checksums can be merged: */
+ if (lp.crc.csum_type != rp.crc.csum_type ||
+ lp.crc.nonce != rp.crc.nonce ||
+ crc_is_compressed(lp.crc) ||
+ !bch2_checksum_mergeable(lp.crc.csum_type))
+ return false;
+
+ if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size ||
+ rp.crc.offset)
+ return false;
+
+ if (lp.crc.csum_type &&
+ lp.crc.uncompressed_size +
+ rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9))
+ return false;
+ }
+
+ en_l = extent_entry_next(en_l);
+ en_r = extent_entry_next(en_r);
+ }
+
+ en_l = l_ptrs.start;
+ en_r = r_ptrs.start;
+ while (en_l < l_ptrs.end && en_r < r_ptrs.end) {
+ if (extent_entry_is_crc(en_l)) {
+ struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+
+ if (crc_l.uncompressed_size + crc_r.uncompressed_size >
+ bch2_crc_field_size_max[extent_entry_type(en_l)])
+ return false;
+ }
+
+ en_l = extent_entry_next(en_l);
+ en_r = extent_entry_next(en_r);
+ }
+
+ use_right_ptr = false;
+ en_l = l_ptrs.start;
+ en_r = r_ptrs.start;
+ while (en_l < l_ptrs.end) {
+ if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr &&
+ use_right_ptr)
+ en_l->ptr = en_r->ptr;
+
+ if (extent_entry_is_crc(en_l)) {
+ struct bch_extent_crc_unpacked crc_l =
+ bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
+ struct bch_extent_crc_unpacked crc_r =
+ bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));
+
+ use_right_ptr = false;
+
+ if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
+ crc_l.uncompressed_size) {
+ /* can use left extent's crc entry */
+ } else if (crc_l.live_size <= crc_r.offset) {
+ /* can use right extent's crc entry */
+ crc_r.offset -= crc_l.live_size;
+ bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
+ extent_entry_type(en_l));
+ use_right_ptr = true;
+ } else {
+ crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
+ crc_l.csum,
+ crc_r.csum,
+ crc_r.uncompressed_size << 9);
+
+ crc_l.uncompressed_size += crc_r.uncompressed_size;
+ crc_l.compressed_size += crc_r.compressed_size;
+ bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
+ extent_entry_type(en_l));
+ }
+ }
+
+ en_l = extent_entry_next(en_l);
+ en_r = extent_entry_next(en_r);
+ }
+
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+ return true;
+}
+
+/* KEY_TYPE_reservation: */
+
+int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+
+ if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
+ prt_printf(err, "invalid nr_replicas (%u)",
+ r.v->nr_replicas);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+
+ prt_printf(out, "generation %u replicas %u",
+ le32_to_cpu(r.v->generation),
+ r.v->nr_replicas);
+}
+
+bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
+{
+ struct bkey_s_reservation l = bkey_s_to_reservation(_l);
+ struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r);
+
+ if (l.v->generation != r.v->generation ||
+ l.v->nr_replicas != r.v->nr_replicas)
+ return false;
+
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+ return true;
+}
+
+/* Extent checksum entries: */
+
+/* returns true if not equal */
+static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
+ struct bch_extent_crc_unpacked r)
+{
+ return (l.csum_type != r.csum_type ||
+ l.compression_type != r.compression_type ||
+ l.compressed_size != r.compressed_size ||
+ l.uncompressed_size != r.uncompressed_size ||
+ l.offset != r.offset ||
+ l.live_size != r.live_size ||
+ l.nonce != r.nonce ||
+ bch2_crc_cmp(l.csum, r.csum));
+}
+
+static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
+ struct bch_extent_crc_unpacked n)
+{
+ return !crc_is_compressed(u) &&
+ u.csum_type &&
+ u.uncompressed_size > u.live_size &&
+ bch2_csum_type_is_encryption(u.csum_type) ==
+ bch2_csum_type_is_encryption(n.csum_type);
+}
+
+bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
+ struct bch_extent_crc_unpacked n)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bch_extent_crc_unpacked crc;
+ const union bch_extent_entry *i;
+
+ if (!n.csum_type)
+ return false;
+
+ bkey_for_each_crc(k.k, ptrs, crc, i)
+ if (can_narrow_crc(crc, n))
+ return true;
+
+ return false;
+}
+
+/*
+ * We're writing another replica for this extent, so while we've got the data in
+ * memory we'll be computing a new checksum for the currently live data.
+ *
+ * If there are other replicas we aren't moving, and they are checksummed but
+ * not compressed, we can modify them to point to only the data that is
+ * currently live (so that readers won't have to bounce) while we've got the
+ * checksum we need:
+ */
+bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ struct bch_extent_crc_unpacked u;
+ struct extent_ptr_decoded p;
+ union bch_extent_entry *i;
+ bool ret = false;
+
+ /* Find a checksum entry that covers only live data: */
+ if (!n.csum_type) {
+ bkey_for_each_crc(&k->k, ptrs, u, i)
+ if (!crc_is_compressed(u) &&
+ u.csum_type &&
+ u.live_size == u.uncompressed_size) {
+ n = u;
+ goto found;
+ }
+ return false;
+ }
+found:
+ BUG_ON(crc_is_compressed(n));
+ BUG_ON(n.offset);
+ BUG_ON(n.live_size != k->k.size);
+
+restart_narrow_pointers:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+
+ bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
+ if (can_narrow_crc(p.crc, n)) {
+ bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr);
+ p.ptr.offset += p.crc.offset;
+ p.crc = n;
+ bch2_extent_ptr_decoded_append(k, &p);
+ ret = true;
+ goto restart_narrow_pointers;
+ }
+
+ return ret;
+}
+
+static void bch2_extent_crc_pack(union bch_extent_crc *dst,
+ struct bch_extent_crc_unpacked src,
+ enum bch_extent_entry_type type)
+{
+#define set_common_fields(_dst, _src) \
+ _dst.type = 1 << type; \
+ _dst.csum_type = _src.csum_type, \
+ _dst.compression_type = _src.compression_type, \
+ _dst._compressed_size = _src.compressed_size - 1, \
+ _dst._uncompressed_size = _src.uncompressed_size - 1, \
+ _dst.offset = _src.offset
+
+ switch (type) {
+ case BCH_EXTENT_ENTRY_crc32:
+ set_common_fields(dst->crc32, src);
+ dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ set_common_fields(dst->crc64, src);
+ dst->crc64.nonce = src.nonce;
+ dst->crc64.csum_lo = (u64 __force) src.csum.lo;
+ dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ set_common_fields(dst->crc128, src);
+ dst->crc128.nonce = src.nonce;
+ dst->crc128.csum = src.csum;
+ break;
+ default:
+ BUG();
+ }
+#undef set_common_fields
+}
+
+void bch2_extent_crc_append(struct bkey_i *k,
+ struct bch_extent_crc_unpacked new)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ union bch_extent_crc *crc = (void *) ptrs.end;
+ enum bch_extent_entry_type type;
+
+ if (bch_crc_bytes[new.csum_type] <= 4 &&
+ new.uncompressed_size <= CRC32_SIZE_MAX &&
+ new.nonce <= CRC32_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc32;
+ else if (bch_crc_bytes[new.csum_type] <= 10 &&
+ new.uncompressed_size <= CRC64_SIZE_MAX &&
+ new.nonce <= CRC64_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc64;
+ else if (bch_crc_bytes[new.csum_type] <= 16 &&
+ new.uncompressed_size <= CRC128_SIZE_MAX &&
+ new.nonce <= CRC128_NONCE_MAX)
+ type = BCH_EXTENT_ENTRY_crc128;
+ else
+ BUG();
+
+ bch2_extent_crc_pack(crc, new, type);
+
+ k->k.u64s += extent_entry_u64s(ptrs.end);
+
+ EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
+}
+
+/* Generic code for keys with pointers: */
+
+unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
+{
+ return bch2_bkey_devs(k).nr;
+}
+
+unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
+{
+ return k.k->type == KEY_TYPE_reservation
+ ? bkey_s_c_to_reservation(k).v->nr_replicas
+ : bch2_bkey_dirty_devs(k).nr;
+}
+
+unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
+{
+ unsigned ret = 0;
+
+ if (k.k->type == KEY_TYPE_reservation) {
+ ret = bkey_s_c_to_reservation(k).v->nr_replicas;
+ } else {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ ret += !p.ptr.cached && !crc_is_compressed(p.crc);
+ }
+
+ return ret;
+}
+
+unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned ret = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (!p.ptr.cached && crc_is_compressed(p.crc))
+ ret += p.crc.compressed_size;
+
+ return ret;
+}
+
+bool bch2_bkey_is_incompressible(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_crc_unpacked crc;
+
+ bkey_for_each_crc(k.k, ptrs, crc, entry)
+ if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
+ return true;
+ return false;
+}
+
+unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p = { 0 };
+ unsigned replicas = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if (p.ptr.cached)
+ continue;
+
+ if (p.has_ec)
+ replicas += p.ec.redundancy;
+
+ replicas++;
+
+ }
+
+ return replicas;
+}
+
+unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+{
+ struct bch_dev *ca;
+
+ if (p->ptr.cached)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, p->ptr.dev);
+
+ return ca->mi.durability +
+ (p->has_ec
+ ? p->ec.redundancy
+ : 0);
+}
+
+unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
+{
+ struct bch_dev *ca;
+
+ if (p->ptr.cached)
+ return 0;
+
+ ca = bch_dev_bkey_exists(c, p->ptr.dev);
+
+ if (ca->mi.state == BCH_MEMBER_STATE_failed)
+ return 0;
+
+ return ca->mi.durability +
+ (p->has_ec
+ ? p->ec.redundancy
+ : 0);
+}
+
+unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned durability = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ durability += bch2_extent_ptr_durability(c, &p);
+
+ return durability;
+}
+
+static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned durability = 0;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
+ durability += bch2_extent_ptr_durability(c, &p);
+
+ return durability;
+}
+
+void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry)
+{
+ union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
+ union bch_extent_entry *next = extent_entry_next(entry);
+
+ memmove_u64s(entry, next, (u64 *) end - (u64 *) next);
+ k->k.u64s -= extent_entry_u64s(entry);
+}
+
+void bch2_extent_ptr_decoded_append(struct bkey_i *k,
+ struct extent_ptr_decoded *p)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
+ struct bch_extent_crc_unpacked crc =
+ bch2_extent_crc_unpack(&k->k, NULL);
+ union bch_extent_entry *pos;
+
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = ptrs.start;
+ goto found;
+ }
+
+ bkey_for_each_crc(&k->k, ptrs, crc, pos)
+ if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
+ pos = extent_entry_next(pos);
+ goto found;
+ }
+
+ bch2_extent_crc_append(k, p->crc);
+ pos = bkey_val_end(bkey_i_to_s(k));
+found:
+ p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ptr));
+
+ if (p->has_ec) {
+ p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
+ __extent_entry_insert(k, pos, to_entry(&p->ec));
+ }
+}
+
+static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
+ union bch_extent_entry *entry)
+{
+ union bch_extent_entry *i = ptrs.start;
+
+ if (i == entry)
+ return NULL;
+
+ while (extent_entry_next(i) != entry)
+ i = extent_entry_next(i);
+ return i;
+}
+
+static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
+{
+ union bch_extent_entry *next = extent_entry_next(entry);
+
+ /* stripes have ptrs, but their layout doesn't work with this code */
+ BUG_ON(k.k->type == KEY_TYPE_stripe);
+
+ memmove_u64s_down(entry, next,
+ (u64 *) bkey_val_end(k) - (u64 *) next);
+ k.k->u64s -= (u64 *) next - (u64 *) entry;
+}
+
+/*
+ * Returns pointer to the next entry after the one being dropped:
+ */
+union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k,
+ struct bch_extent_ptr *ptr)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry = to_entry(ptr), *next;
+ union bch_extent_entry *ret = entry;
+ bool drop_crc = true;
+
+ EBUG_ON(ptr < &ptrs.start->ptr ||
+ ptr >= &ptrs.end->ptr);
+ EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);
+
+ for (next = extent_entry_next(entry);
+ next != ptrs.end;
+ next = extent_entry_next(next)) {
+ if (extent_entry_is_crc(next)) {
+ break;
+ } else if (extent_entry_is_ptr(next)) {
+ drop_crc = false;
+ break;
+ }
+ }
+
+ extent_entry_drop(k, entry);
+
+ while ((entry = extent_entry_prev(ptrs, entry))) {
+ if (extent_entry_is_ptr(entry))
+ break;
+
+ if ((extent_entry_is_crc(entry) && drop_crc) ||
+ extent_entry_is_stripe_ptr(entry)) {
+ ret = (void *) ret - extent_entry_bytes(entry);
+ extent_entry_drop(k, entry);
+ }
+ }
+
+ return ret;
+}
+
+union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
+ struct bch_extent_ptr *ptr)
+{
+ bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr;
+ union bch_extent_entry *ret =
+ bch2_bkey_drop_ptr_noerror(k, ptr);
+
+ /*
+ * If we deleted all the dirty pointers and there's still cached
+ * pointers, we could set the cached pointers to dirty if they're not
+ * stale - but to do that correctly we'd need to grab an open_bucket
+ * reference so that we don't race with bucket reuse:
+ */
+ if (have_dirty &&
+ !bch2_bkey_dirty_devs(k.s_c).nr) {
+ k.k->type = KEY_TYPE_error;
+ set_bkey_val_u64s(k.k, 0);
+ ret = NULL;
+ } else if (!bch2_bkey_nr_ptrs(k.s_c)) {
+ k.k->type = KEY_TYPE_deleted;
+ set_bkey_val_u64s(k.k, 0);
+ ret = NULL;
+ }
+
+ return ret;
+}
+
+void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
+{
+ struct bch_extent_ptr *ptr;
+
+ bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
+}
+
+void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev)
+{
+ struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev);
+
+ if (ptr)
+ bch2_bkey_drop_ptr_noerror(k, ptr);
+}
+
+const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->dev == dev)
+ return ptr;
+
+ return NULL;
+}
+
+bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ if (bch2_dev_in_target(c, ptr->dev, target) &&
+ (!ptr->cached ||
+ !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
+ return true;
+
+ return false;
+}
+
+bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_extent_ptr m, u64 offset)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ if (p.ptr.dev == m.dev &&
+ p.ptr.gen == m.gen &&
+ (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
+ (s64) m.offset - offset)
+ return true;
+
+ return false;
+}
+
+/*
+ * Returns true if two extents refer to the same data:
+ */
+bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
+{
+ if (k1.k->type != k2.k->type)
+ return false;
+
+ if (bkey_extent_is_direct_data(k1.k)) {
+ struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
+ struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
+ const union bch_extent_entry *entry1, *entry2;
+ struct extent_ptr_decoded p1, p2;
+
+ if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
+ return false;
+
+ bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
+ bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
+ if (p1.ptr.dev == p2.ptr.dev &&
+ p1.ptr.gen == p2.ptr.gen &&
+ (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
+ (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
+ return true;
+
+ return false;
+ } else {
+ /* KEY_TYPE_deleted, etc. */
+ return true;
+ }
+}
+
+struct bch_extent_ptr *
+bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2)
+{
+ struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2);
+ union bch_extent_entry *entry2;
+ struct extent_ptr_decoded p2;
+
+ bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
+ if (p1.ptr.dev == p2.ptr.dev &&
+ p1.ptr.gen == p2.ptr.gen &&
+ (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
+ (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
+ return &entry2->ptr;
+
+ return NULL;
+}
+
+void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ union bch_extent_entry *ec = NULL;
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (&entry->ptr == ptr) {
+ ptr->cached = true;
+ if (ec)
+ extent_entry_drop(k, ec);
+ return;
+ }
+
+ if (extent_entry_is_stripe_ptr(entry))
+ ec = entry;
+ else if (extent_entry_is_ptr(entry))
+ ec = NULL;
+ }
+
+ BUG();
+}
+
+/*
+ * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
+ *
+ * Returns true if @k should be dropped entirely
+ *
+ * For existing keys, only called when btree nodes are being rewritten, not when
+ * they're merely being compacted/resorted in memory.
+ */
+bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
+{
+ struct bch_extent_ptr *ptr;
+
+ bch2_bkey_drop_ptrs(k, ptr,
+ ptr->cached &&
+ ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
+
+ return bkey_deleted(k.k);
+}
+
+void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_crc_unpacked crc;
+ const struct bch_extent_ptr *ptr;
+ const struct bch_extent_stripe_ptr *ec;
+ struct bch_dev *ca;
+ bool first = true;
+
+ if (c)
+ prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (!first)
+ prt_printf(out, " ");
+
+ switch (__extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ ptr = entry_to_ptr(entry);
+ ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
+ ? bch_dev_bkey_exists(c, ptr->dev)
+ : NULL;
+
+ if (!ca) {
+ prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
+ (u64) ptr->offset, ptr->gen,
+ ptr->cached ? " cached" : "");
+ } else {
+ u32 offset;
+ u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
+
+ prt_printf(out, "ptr: %u:%llu:%u gen %u",
+ ptr->dev, b, offset, ptr->gen);
+ if (ptr->cached)
+ prt_str(out, " cached");
+ if (ptr->unwritten)
+ prt_str(out, " unwritten");
+ if (ca && ptr_stale(ca, ptr))
+ prt_printf(out, " stale");
+ }
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
+
+ prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
+ crc.compressed_size,
+ crc.uncompressed_size,
+ crc.offset, crc.nonce,
+ bch2_csum_types[crc.csum_type],
+ bch2_compression_types[crc.compression_type]);
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ ec = &entry->stripe_ptr;
+
+ prt_printf(out, "ec: idx %llu block %u",
+ (u64) ec->idx, ec->block);
+ break;
+ default:
+ prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
+ return;
+ }
+
+ first = false;
+ }
+}
+
+static int extent_ptr_invalid(const struct bch_fs *c,
+ struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ const struct bch_extent_ptr *ptr,
+ unsigned size_ondisk,
+ bool metadata,
+ struct printbuf *err)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr2;
+ u64 bucket;
+ u32 bucket_offset;
+ struct bch_dev *ca;
+
+ if (!bch2_dev_exists2(c, ptr->dev)) {
+ /*
+ * If we're in the write path this key might have already been
+ * overwritten, and we could be seeing a device that doesn't
+ * exist anymore due to racing with device removal:
+ */
+ if (flags & BKEY_INVALID_WRITE)
+ return 0;
+
+ prt_printf(err, "pointer to invalid device (%u)", ptr->dev);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+ bkey_for_each_ptr(ptrs, ptr2)
+ if (ptr != ptr2 && ptr->dev == ptr2->dev) {
+ prt_printf(err, "multiple pointers to same device (%u)", ptr->dev);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
+
+ if (bucket >= ca->mi.nbuckets) {
+ prt_printf(err, "pointer past last bucket (%llu > %llu)",
+ bucket, ca->mi.nbuckets);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
+ prt_printf(err, "pointer before first bucket (%llu < %u)",
+ bucket, ca->mi.first_bucket);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
+ prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)",
+ bucket_offset, size_ondisk, ca->mi.bucket_size);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct bch_extent_crc_unpacked crc;
+ unsigned size_ondisk = k.k->size;
+ unsigned nonce = UINT_MAX;
+ unsigned nr_ptrs = 0;
+ bool unwritten = false, have_ec = false, crc_since_last_ptr = false;
+ int ret;
+
+ if (bkey_is_btree_ptr(k.k))
+ size_ondisk = btree_sectors(c);
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
+ prt_printf(err, "invalid extent entry type (got %u, max %u)",
+ __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bkey_is_btree_ptr(k.k) &&
+ !extent_entry_is_ptr(entry)) {
+ prt_printf(err, "has non ptr field");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ ret = extent_ptr_invalid(c, k, flags, &entry->ptr,
+ size_ondisk, false, err);
+ if (ret)
+ return ret;
+
+ if (nr_ptrs && unwritten != entry->ptr.unwritten) {
+ prt_printf(err, "extent with unwritten and written ptrs");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
+ prt_printf(err, "has unwritten ptrs");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (entry->ptr.cached && have_ec) {
+ prt_printf(err, "cached, erasure coded ptr");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ unwritten = entry->ptr.unwritten;
+ have_ec = false;
+ crc_since_last_ptr = false;
+ nr_ptrs++;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
+
+ if (crc.offset + crc.live_size >
+ crc.uncompressed_size) {
+ prt_printf(err, "checksum offset + key size > uncompressed size");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ size_ondisk = crc.compressed_size;
+
+ if (!bch2_checksum_type_valid(c, crc.csum_type)) {
+ prt_printf(err, "invalid checksum type");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
+ prt_printf(err, "invalid compression type");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bch2_csum_type_is_encryption(crc.csum_type)) {
+ if (nonce == UINT_MAX)
+ nonce = crc.offset + crc.nonce;
+ else if (nonce != crc.offset + crc.nonce) {
+ prt_printf(err, "incorrect nonce");
+ return -BCH_ERR_invalid_bkey;
+ }
+ }
+
+ if (crc_since_last_ptr) {
+ prt_printf(err, "redundant crc entry");
+ return -BCH_ERR_invalid_bkey;
+ }
+ crc_since_last_ptr = true;
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ if (have_ec) {
+ prt_printf(err, "redundant stripe entry");
+ return -BCH_ERR_invalid_bkey;
+ }
+ have_ec = true;
+ break;
+ case BCH_EXTENT_ENTRY_rebalance:
+ break;
+ }
+ }
+
+ if (!nr_ptrs) {
+ prt_str(err, "no ptrs");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (nr_ptrs >= BCH_BKEY_PTRS_MAX) {
+ prt_str(err, "too many ptrs");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (crc_since_last_ptr) {
+ prt_printf(err, "redundant crc entry");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (have_ec) {
+ prt_printf(err, "redundant stripe entry");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_ptr_swab(struct bkey_s k)
+{
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ u64 *d;
+
+ for (d = (u64 *) ptrs.start;
+ d != (u64 *) ptrs.end;
+ d++)
+ *d = swab64(*d);
+
+ for (entry = ptrs.start;
+ entry < ptrs.end;
+ entry = extent_entry_next(entry)) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.csum = swab32(entry->crc32.csum);
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
+ entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.csum.hi = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.hi);
+ entry->crc128.csum.lo = (__force __le64)
+ swab64((__force u64) entry->crc128.csum.lo);
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ break;
+ case BCH_EXTENT_ENTRY_rebalance:
+ break;
+ }
+ }
+}
+
+/* Generic extent code: */
+
+int bch2_cut_front_s(struct bpos where, struct bkey_s k)
+{
+ unsigned new_val_u64s = bkey_val_u64s(k.k);
+ int val_u64s_delta;
+ u64 sub;
+
+ if (bkey_le(where, bkey_start_pos(k.k)))
+ return 0;
+
+ EBUG_ON(bkey_gt(where, k.k->p));
+
+ sub = where.offset - bkey_start_offset(k.k);
+
+ k.k->size -= sub;
+
+ if (!k.k->size) {
+ k.k->type = KEY_TYPE_deleted;
+ new_val_u64s = 0;
+ }
+
+ switch (k.k->type) {
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v: {
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
+ union bch_extent_entry *entry;
+ bool seen_crc = false;
+
+ bkey_extent_entry_for_each(ptrs, entry) {
+ switch (extent_entry_type(entry)) {
+ case BCH_EXTENT_ENTRY_ptr:
+ if (!seen_crc)
+ entry->ptr.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc32:
+ entry->crc32.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc64:
+ entry->crc64.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_crc128:
+ entry->crc128.offset += sub;
+ break;
+ case BCH_EXTENT_ENTRY_stripe_ptr:
+ break;
+ case BCH_EXTENT_ENTRY_rebalance:
+ break;
+ }
+
+ if (extent_entry_is_crc(entry))
+ seen_crc = true;
+ }
+
+ break;
+ }
+ case KEY_TYPE_reflink_p: {
+ struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);
+
+ le64_add_cpu(&p.v->idx, sub);
+ break;
+ }
+ case KEY_TYPE_inline_data:
+ case KEY_TYPE_indirect_inline_data: {
+ void *p = bkey_inline_data_p(k);
+ unsigned bytes = bkey_inline_data_bytes(k.k);
+
+ sub = min_t(u64, sub << 9, bytes);
+
+ memmove(p, p + sub, bytes - sub);
+
+ new_val_u64s -= sub >> 3;
+ break;
+ }
+ }
+
+ val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
+ BUG_ON(val_u64s_delta < 0);
+
+ set_bkey_val_u64s(k.k, new_val_u64s);
+ memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
+ return -val_u64s_delta;
+}
+
+int bch2_cut_back_s(struct bpos where, struct bkey_s k)
+{
+ unsigned new_val_u64s = bkey_val_u64s(k.k);
+ int val_u64s_delta;
+ u64 len = 0;
+
+ if (bkey_ge(where, k.k->p))
+ return 0;
+
+ EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
+
+ len = where.offset - bkey_start_offset(k.k);
+
+ k.k->p.offset = where.offset;
+ k.k->size = len;
+
+ if (!len) {
+ k.k->type = KEY_TYPE_deleted;
+ new_val_u64s = 0;
+ }
+
+ switch (k.k->type) {
+ case KEY_TYPE_inline_data:
+ case KEY_TYPE_indirect_inline_data:
+ new_val_u64s = (bkey_inline_data_offset(k.k) +
+ min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
+ break;
+ }
+
+ val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
+ BUG_ON(val_u64s_delta < 0);
+
+ set_bkey_val_u64s(k.k, new_val_u64s);
+ memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
+ return -val_u64s_delta;
+}
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
new file mode 100644
index 000000000000..879e7d218b6a
--- /dev/null
+++ b/fs/bcachefs/extents.h
@@ -0,0 +1,758 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EXTENTS_H
+#define _BCACHEFS_EXTENTS_H
+
+#include "bcachefs.h"
+#include "bkey.h"
+#include "extents_types.h"
+
+struct bch_fs;
+struct btree_trans;
+enum bkey_invalid_flags;
+
+/* extent entries: */
+
+#define extent_entry_last(_e) \
+ ((typeof(&(_e).v->start[0])) bkey_val_end(_e))
+
+#define entry_to_ptr(_entry) \
+({ \
+ EBUG_ON((_entry) && !extent_entry_is_ptr(_entry)); \
+ \
+ __builtin_choose_expr( \
+ type_is_exact(_entry, const union bch_extent_entry *), \
+ (const struct bch_extent_ptr *) (_entry), \
+ (struct bch_extent_ptr *) (_entry)); \
+})
+
+/* downcast, preserves const */
+#define to_entry(_entry) \
+({ \
+ BUILD_BUG_ON(!type_is(_entry, union bch_extent_crc *) && \
+ !type_is(_entry, struct bch_extent_ptr *) && \
+ !type_is(_entry, struct bch_extent_stripe_ptr *)); \
+ \
+ __builtin_choose_expr( \
+ (type_is_exact(_entry, const union bch_extent_crc *) || \
+ type_is_exact(_entry, const struct bch_extent_ptr *) ||\
+ type_is_exact(_entry, const struct bch_extent_stripe_ptr *)),\
+ (const union bch_extent_entry *) (_entry), \
+ (union bch_extent_entry *) (_entry)); \
+})
+
+#define extent_entry_next(_entry) \
+ ((typeof(_entry)) ((void *) (_entry) + extent_entry_bytes(_entry)))
+
+static inline unsigned
+__extent_entry_type(const union bch_extent_entry *e)
+{
+ return e->type ? __ffs(e->type) : BCH_EXTENT_ENTRY_MAX;
+}
+
+static inline enum bch_extent_entry_type
+extent_entry_type(const union bch_extent_entry *e)
+{
+ int ret = __ffs(e->type);
+
+ EBUG_ON(ret < 0 || ret >= BCH_EXTENT_ENTRY_MAX);
+
+ return ret;
+}
+
+static inline size_t extent_entry_bytes(const union bch_extent_entry *entry)
+{
+ switch (extent_entry_type(entry)) {
+#define x(f, n) \
+ case BCH_EXTENT_ENTRY_##f: \
+ return sizeof(struct bch_extent_##f);
+ BCH_EXTENT_ENTRY_TYPES()
+#undef x
+ default:
+ BUG();
+ }
+}
+
+static inline size_t extent_entry_u64s(const union bch_extent_entry *entry)
+{
+ return extent_entry_bytes(entry) / sizeof(u64);
+}
+
+static inline void __extent_entry_insert(struct bkey_i *k,
+ union bch_extent_entry *dst,
+ union bch_extent_entry *new)
+{
+ union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
+
+ memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
+ dst, (u64 *) end - (u64 *) dst);
+ k->k.u64s += extent_entry_u64s(new);
+ memcpy_u64s_small(dst, new, extent_entry_u64s(new));
+}
+
+static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
+{
+ return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
+}
+
+static inline bool extent_entry_is_stripe_ptr(const union bch_extent_entry *e)
+{
+ return extent_entry_type(e) == BCH_EXTENT_ENTRY_stripe_ptr;
+}
+
+static inline bool extent_entry_is_crc(const union bch_extent_entry *e)
+{
+ switch (extent_entry_type(e)) {
+ case BCH_EXTENT_ENTRY_crc32:
+ case BCH_EXTENT_ENTRY_crc64:
+ case BCH_EXTENT_ENTRY_crc128:
+ return true;
+ default:
+ return false;
+ }
+}
+
+union bch_extent_crc {
+ u8 type;
+ struct bch_extent_crc32 crc32;
+ struct bch_extent_crc64 crc64;
+ struct bch_extent_crc128 crc128;
+};
+
+#define __entry_to_crc(_entry) \
+ __builtin_choose_expr( \
+ type_is_exact(_entry, const union bch_extent_entry *), \
+ (const union bch_extent_crc *) (_entry), \
+ (union bch_extent_crc *) (_entry))
+
+#define entry_to_crc(_entry) \
+({ \
+ EBUG_ON((_entry) && !extent_entry_is_crc(_entry)); \
+ \
+ __entry_to_crc(_entry); \
+})
+
+static inline struct bch_extent_crc_unpacked
+bch2_extent_crc_unpack(const struct bkey *k, const union bch_extent_crc *crc)
+{
+#define common_fields(_crc) \
+ .csum_type = _crc.csum_type, \
+ .compression_type = _crc.compression_type, \
+ .compressed_size = _crc._compressed_size + 1, \
+ .uncompressed_size = _crc._uncompressed_size + 1, \
+ .offset = _crc.offset, \
+ .live_size = k->size
+
+ if (!crc)
+ return (struct bch_extent_crc_unpacked) {
+ .compressed_size = k->size,
+ .uncompressed_size = k->size,
+ .live_size = k->size,
+ };
+
+ switch (extent_entry_type(to_entry(crc))) {
+ case BCH_EXTENT_ENTRY_crc32: {
+ struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
+ common_fields(crc->crc32),
+ };
+
+ *((__le32 *) &ret.csum.lo) = (__le32 __force) crc->crc32.csum;
+ return ret;
+ }
+ case BCH_EXTENT_ENTRY_crc64: {
+ struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
+ common_fields(crc->crc64),
+ .nonce = crc->crc64.nonce,
+ .csum.lo = (__force __le64) crc->crc64.csum_lo,
+ };
+
+ *((__le16 *) &ret.csum.hi) = (__le16 __force) crc->crc64.csum_hi;
+
+ return ret;
+ }
+ case BCH_EXTENT_ENTRY_crc128: {
+ struct bch_extent_crc_unpacked ret = (struct bch_extent_crc_unpacked) {
+ common_fields(crc->crc128),
+ .nonce = crc->crc128.nonce,
+ .csum = crc->crc128.csum,
+ };
+
+ return ret;
+ }
+ default:
+ BUG();
+ }
+#undef common_fields
+}
+
+static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
+{
+ return (crc.compression_type != BCH_COMPRESSION_TYPE_none &&
+ crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
+}
+
+/* bkey_ptrs: generically over any key type that has ptrs */
+
+struct bkey_ptrs_c {
+ const union bch_extent_entry *start;
+ const union bch_extent_entry *end;
+};
+
+struct bkey_ptrs {
+ union bch_extent_entry *start;
+ union bch_extent_entry *end;
+};
+
+static inline struct bkey_ptrs_c bch2_bkey_ptrs_c(struct bkey_s_c k)
+{
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr: {
+ struct bkey_s_c_btree_ptr e = bkey_s_c_to_btree_ptr(k);
+
+ return (struct bkey_ptrs_c) {
+ to_entry(&e.v->start[0]),
+ to_entry(extent_entry_last(e))
+ };
+ }
+ case KEY_TYPE_extent: {
+ struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+
+ return (struct bkey_ptrs_c) {
+ e.v->start,
+ extent_entry_last(e)
+ };
+ }
+ case KEY_TYPE_stripe: {
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
+ return (struct bkey_ptrs_c) {
+ to_entry(&s.v->ptrs[0]),
+ to_entry(&s.v->ptrs[s.v->nr_blocks]),
+ };
+ }
+ case KEY_TYPE_reflink_v: {
+ struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
+
+ return (struct bkey_ptrs_c) {
+ r.v->start,
+ bkey_val_end(r),
+ };
+ }
+ case KEY_TYPE_btree_ptr_v2: {
+ struct bkey_s_c_btree_ptr_v2 e = bkey_s_c_to_btree_ptr_v2(k);
+
+ return (struct bkey_ptrs_c) {
+ to_entry(&e.v->start[0]),
+ to_entry(extent_entry_last(e))
+ };
+ }
+ default:
+ return (struct bkey_ptrs_c) { NULL, NULL };
+ }
+}
+
+static inline struct bkey_ptrs bch2_bkey_ptrs(struct bkey_s k)
+{
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k.s_c);
+
+ return (struct bkey_ptrs) {
+ (void *) p.start,
+ (void *) p.end
+ };
+}
+
+#define __bkey_extent_entry_for_each_from(_start, _end, _entry) \
+ for ((_entry) = (_start); \
+ (_entry) < (_end); \
+ (_entry) = extent_entry_next(_entry))
+
+#define __bkey_ptr_next(_ptr, _end) \
+({ \
+ typeof(_end) _entry; \
+ \
+ __bkey_extent_entry_for_each_from(to_entry(_ptr), _end, _entry) \
+ if (extent_entry_is_ptr(_entry)) \
+ break; \
+ \
+ _entry < (_end) ? entry_to_ptr(_entry) : NULL; \
+})
+
+#define bkey_extent_entry_for_each_from(_p, _entry, _start) \
+ __bkey_extent_entry_for_each_from(_start, (_p).end, _entry)
+
+#define bkey_extent_entry_for_each(_p, _entry) \
+ bkey_extent_entry_for_each_from(_p, _entry, _p.start)
+
+#define __bkey_for_each_ptr(_start, _end, _ptr) \
+ for ((_ptr) = (_start); \
+ ((_ptr) = __bkey_ptr_next(_ptr, _end)); \
+ (_ptr)++)
+
+#define bkey_ptr_next(_p, _ptr) \
+ __bkey_ptr_next(_ptr, (_p).end)
+
+#define bkey_for_each_ptr(_p, _ptr) \
+ __bkey_for_each_ptr(&(_p).start->ptr, (_p).end, _ptr)
+
+#define __bkey_ptr_next_decode(_k, _end, _ptr, _entry) \
+({ \
+ __label__ out; \
+ \
+ (_ptr).idx = 0; \
+ (_ptr).has_ec = false; \
+ \
+ __bkey_extent_entry_for_each_from(_entry, _end, _entry) \
+ switch (extent_entry_type(_entry)) { \
+ case BCH_EXTENT_ENTRY_ptr: \
+ (_ptr).ptr = _entry->ptr; \
+ goto out; \
+ case BCH_EXTENT_ENTRY_crc32: \
+ case BCH_EXTENT_ENTRY_crc64: \
+ case BCH_EXTENT_ENTRY_crc128: \
+ (_ptr).crc = bch2_extent_crc_unpack(_k, \
+ entry_to_crc(_entry)); \
+ break; \
+ case BCH_EXTENT_ENTRY_stripe_ptr: \
+ (_ptr).ec = _entry->stripe_ptr; \
+ (_ptr).has_ec = true; \
+ break; \
+ default: \
+ /* nothing */ \
+ break; \
+ } \
+out: \
+ _entry < (_end); \
+})
+
+#define __bkey_for_each_ptr_decode(_k, _start, _end, _ptr, _entry) \
+ for ((_ptr).crc = bch2_extent_crc_unpack(_k, NULL), \
+ (_entry) = _start; \
+ __bkey_ptr_next_decode(_k, _end, _ptr, _entry); \
+ (_entry) = extent_entry_next(_entry))
+
+#define bkey_for_each_ptr_decode(_k, _p, _ptr, _entry) \
+ __bkey_for_each_ptr_decode(_k, (_p).start, (_p).end, \
+ _ptr, _entry)
+
+#define bkey_crc_next(_k, _start, _end, _crc, _iter) \
+({ \
+ __bkey_extent_entry_for_each_from(_iter, _end, _iter) \
+ if (extent_entry_is_crc(_iter)) { \
+ (_crc) = bch2_extent_crc_unpack(_k, \
+ entry_to_crc(_iter)); \
+ break; \
+ } \
+ \
+ (_iter) < (_end); \
+})
+
+#define __bkey_for_each_crc(_k, _start, _end, _crc, _iter) \
+ for ((_crc) = bch2_extent_crc_unpack(_k, NULL), \
+ (_iter) = (_start); \
+ bkey_crc_next(_k, _start, _end, _crc, _iter); \
+ (_iter) = extent_entry_next(_iter))
+
+#define bkey_for_each_crc(_k, _p, _crc, _iter) \
+ __bkey_for_each_crc(_k, (_p).start, (_p).end, _crc, _iter)
+
+/* Iterate over pointers in KEY_TYPE_extent: */
+
+#define extent_for_each_entry_from(_e, _entry, _start) \
+ __bkey_extent_entry_for_each_from(_start, \
+ extent_entry_last(_e), _entry)
+
+#define extent_for_each_entry(_e, _entry) \
+ extent_for_each_entry_from(_e, _entry, (_e).v->start)
+
+#define extent_ptr_next(_e, _ptr) \
+ __bkey_ptr_next(_ptr, extent_entry_last(_e))
+
+#define extent_for_each_ptr(_e, _ptr) \
+ __bkey_for_each_ptr(&(_e).v->start->ptr, extent_entry_last(_e), _ptr)
+
+#define extent_for_each_ptr_decode(_e, _ptr, _entry) \
+ __bkey_for_each_ptr_decode((_e).k, (_e).v->start, \
+ extent_entry_last(_e), _ptr, _entry)
+
+/* utility code common to all keys with pointers: */
+
+void bch2_mark_io_failure(struct bch_io_failures *,
+ struct extent_ptr_decoded *);
+int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
+ struct bch_io_failures *,
+ struct extent_ptr_decoded *);
+
+/* KEY_TYPE_btree_ptr: */
+
+int bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+
+int bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
+ int, struct bkey_s);
+
+#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) { \
+ .key_invalid = bch2_btree_ptr_invalid, \
+ .val_to_text = bch2_btree_ptr_to_text, \
+ .swab = bch2_ptr_swab, \
+ .trans_trigger = bch2_trans_mark_extent, \
+ .atomic_trigger = bch2_mark_extent, \
+})
+
+#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \
+ .key_invalid = bch2_btree_ptr_v2_invalid, \
+ .val_to_text = bch2_btree_ptr_v2_to_text, \
+ .swab = bch2_ptr_swab, \
+ .compat = bch2_btree_ptr_v2_compat, \
+ .trans_trigger = bch2_trans_mark_extent, \
+ .atomic_trigger = bch2_mark_extent, \
+ .min_val_size = 40, \
+})
+
+/* KEY_TYPE_extent: */
+
+bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
+
+#define bch2_bkey_ops_extent ((struct bkey_ops) { \
+ .key_invalid = bch2_bkey_ptrs_invalid, \
+ .val_to_text = bch2_bkey_ptrs_to_text, \
+ .swab = bch2_ptr_swab, \
+ .key_normalize = bch2_extent_normalize, \
+ .key_merge = bch2_extent_merge, \
+ .trans_trigger = bch2_trans_mark_extent, \
+ .atomic_trigger = bch2_mark_extent, \
+})
+
+/* KEY_TYPE_reservation: */
+
+int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
+
+#define bch2_bkey_ops_reservation ((struct bkey_ops) { \
+ .key_invalid = bch2_reservation_invalid, \
+ .val_to_text = bch2_reservation_to_text, \
+ .key_merge = bch2_reservation_merge, \
+ .trans_trigger = bch2_trans_mark_reservation, \
+ .atomic_trigger = bch2_mark_reservation, \
+ .min_val_size = 8, \
+})
+
+/* Extent checksum entries: */
+
+bool bch2_can_narrow_extent_crcs(struct bkey_s_c,
+ struct bch_extent_crc_unpacked);
+bool bch2_bkey_narrow_crcs(struct bkey_i *, struct bch_extent_crc_unpacked);
+void bch2_extent_crc_append(struct bkey_i *,
+ struct bch_extent_crc_unpacked);
+
+/* Generic code for keys with pointers: */
+
+static inline bool bkey_is_btree_ptr(const struct bkey *k)
+{
+ switch (k->type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool bkey_extent_is_direct_data(const struct bkey *k)
+{
+ switch (k->type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool bkey_extent_is_inline_data(const struct bkey *k)
+{
+ return k->type == KEY_TYPE_inline_data ||
+ k->type == KEY_TYPE_indirect_inline_data;
+}
+
+static inline unsigned bkey_inline_data_offset(const struct bkey *k)
+{
+ switch (k->type) {
+ case KEY_TYPE_inline_data:
+ return sizeof(struct bch_inline_data);
+ case KEY_TYPE_indirect_inline_data:
+ return sizeof(struct bch_indirect_inline_data);
+ default:
+ BUG();
+ }
+}
+
+static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
+{
+ return bkey_val_bytes(k) - bkey_inline_data_offset(k);
+}
+
+#define bkey_inline_data_p(_k) (((void *) (_k).v) + bkey_inline_data_offset((_k).k))
+
+static inline bool bkey_extent_is_data(const struct bkey *k)
+{
+ return bkey_extent_is_direct_data(k) ||
+ bkey_extent_is_inline_data(k) ||
+ k->type == KEY_TYPE_reflink_p;
+}
+
+/*
+ * Should extent be counted under inode->i_sectors?
+ */
+static inline bool bkey_extent_is_allocation(const struct bkey *k)
+{
+ switch (k->type) {
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reservation:
+ case KEY_TYPE_reflink_p:
+ case KEY_TYPE_reflink_v:
+ case KEY_TYPE_inline_data:
+ case KEY_TYPE_indirect_inline_data:
+ case KEY_TYPE_error:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool bkey_extent_is_unwritten(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(ptrs, ptr)
+ if (ptr->unwritten)
+ return true;
+ return false;
+}
+
+static inline bool bkey_extent_is_reservation(struct bkey_s_c k)
+{
+ return k.k->type == KEY_TYPE_reservation ||
+ bkey_extent_is_unwritten(k);
+}
+
+static inline struct bch_devs_list bch2_bkey_devs(struct bkey_s_c k)
+{
+ struct bch_devs_list ret = (struct bch_devs_list) { 0 };
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(p, ptr)
+ ret.devs[ret.nr++] = ptr->dev;
+
+ return ret;
+}
+
+static inline struct bch_devs_list bch2_bkey_dirty_devs(struct bkey_s_c k)
+{
+ struct bch_devs_list ret = (struct bch_devs_list) { 0 };
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(p, ptr)
+ if (!ptr->cached)
+ ret.devs[ret.nr++] = ptr->dev;
+
+ return ret;
+}
+
+static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
+{
+ struct bch_devs_list ret = (struct bch_devs_list) { 0 };
+ struct bkey_ptrs_c p = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ bkey_for_each_ptr(p, ptr)
+ if (ptr->cached)
+ ret.devs[ret.nr++] = ptr->dev;
+
+ return ret;
+}
+
+static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
+{
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ return BCH_DATA_btree;
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v:
+ return BCH_DATA_user;
+ case KEY_TYPE_stripe: {
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
+ BUG_ON(ptr < s.v->ptrs ||
+ ptr >= s.v->ptrs + s.v->nr_blocks);
+
+ return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
+ ? BCH_DATA_parity
+ : BCH_DATA_user;
+ }
+ default:
+ BUG();
+ }
+}
+
+unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
+unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
+unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
+bool bch2_bkey_is_incompressible(struct bkey_s_c);
+unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
+
+unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
+unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
+unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
+unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
+
+void bch2_bkey_drop_device(struct bkey_s, unsigned);
+void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
+
+const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c, unsigned);
+
+static inline struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s k, unsigned dev)
+{
+ return (void *) bch2_bkey_has_device_c(k.s_c, dev);
+}
+
+bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
+
+void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
+
+static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
+{
+ struct bch_extent_ptr *dest;
+
+ EBUG_ON(bch2_bkey_has_device(bkey_i_to_s(k), ptr.dev));
+
+ switch (k->k.type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ case KEY_TYPE_extent:
+ EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
+
+ ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
+ dest = (struct bch_extent_ptr *)((void *) &k->v + bkey_val_bytes(&k->k));
+ *dest = ptr;
+ k->k.u64s++;
+ break;
+ default:
+ BUG();
+ }
+}
+
+void bch2_extent_ptr_decoded_append(struct bkey_i *,
+ struct extent_ptr_decoded *);
+union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s,
+ struct bch_extent_ptr *);
+union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
+ struct bch_extent_ptr *);
+
+#define bch2_bkey_drop_ptrs(_k, _ptr, _cond) \
+do { \
+ struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k); \
+ \
+ _ptr = &_ptrs.start->ptr; \
+ \
+ while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) { \
+ if (_cond) { \
+ _ptr = (void *) bch2_bkey_drop_ptr(_k, _ptr); \
+ _ptrs = bch2_bkey_ptrs(_k); \
+ continue; \
+ } \
+ \
+ (_ptr)++; \
+ } \
+} while (0)
+
+bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
+ struct bch_extent_ptr, u64);
+bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
+struct bch_extent_ptr *
+bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
+
+void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
+
+bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
+void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+
+void bch2_ptr_swab(struct bkey_s);
+
+/* Generic extent code: */
+
+enum bch_extent_overlap {
+ BCH_EXTENT_OVERLAP_ALL = 0,
+ BCH_EXTENT_OVERLAP_BACK = 1,
+ BCH_EXTENT_OVERLAP_FRONT = 2,
+ BCH_EXTENT_OVERLAP_MIDDLE = 3,
+};
+
+/* Returns how k overlaps with m */
+static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
+ const struct bkey *m)
+{
+ int cmp1 = bkey_lt(k->p, m->p);
+ int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
+
+ return (cmp1 << 1) + cmp2;
+}
+
+int bch2_cut_front_s(struct bpos, struct bkey_s);
+int bch2_cut_back_s(struct bpos, struct bkey_s);
+
+static inline void bch2_cut_front(struct bpos where, struct bkey_i *k)
+{
+ bch2_cut_front_s(where, bkey_i_to_s(k));
+}
+
+static inline void bch2_cut_back(struct bpos where, struct bkey_i *k)
+{
+ bch2_cut_back_s(where, bkey_i_to_s(k));
+}
+
+/**
+ * bch_key_resize - adjust size of @k
+ *
+ * bkey_start_offset(k) will be preserved, modifies where the extent ends
+ */
+static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
+{
+ k->p.offset -= k->size;
+ k->p.offset += new_size;
+ k->size = new_size;
+}
+
+/*
+ * In extent_sort_fix_overlapping(), insert_fixup_extent(),
+ * extent_merge_inline() - we're modifying keys in place that are packed. To do
+ * that we have to unpack the key, modify the unpacked key - then this
+ * copies/repacks the unpacked to the original as necessary.
+ */
+static inline void extent_save(struct btree *b, struct bkey_packed *dst,
+ struct bkey *src)
+{
+ struct bkey_format *f = &b->format;
+ struct bkey_i *dst_unpacked;
+
+ if ((dst_unpacked = packed_to_bkey(dst)))
+ dst_unpacked->k = *src;
+ else
+ BUG_ON(!bch2_bkey_pack_key(dst, src, f));
+}
+
+#endif /* _BCACHEFS_EXTENTS_H */
diff --git a/fs/bcachefs/extents_types.h b/fs/bcachefs/extents_types.h
new file mode 100644
index 000000000000..43d6c341ecca
--- /dev/null
+++ b/fs/bcachefs/extents_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_EXTENTS_TYPES_H
+#define _BCACHEFS_EXTENTS_TYPES_H
+
+#include "bcachefs_format.h"
+
+struct bch_extent_crc_unpacked {
+ u32 compressed_size;
+ u32 uncompressed_size;
+ u32 live_size;
+
+ u8 csum_type;
+ u8 compression_type;
+
+ u16 offset;
+
+ u16 nonce;
+
+ struct bch_csum csum;
+};
+
+struct extent_ptr_decoded {
+ unsigned idx;
+ bool has_ec;
+ struct bch_extent_crc_unpacked crc;
+ struct bch_extent_ptr ptr;
+ struct bch_extent_stripe_ptr ec;
+};
+
+struct bch_io_failures {
+ u8 nr;
+ struct bch_dev_io_failures {
+ u8 dev;
+ u8 idx;
+ u8 nr_failed;
+ u8 nr_retries;
+ } devs[BCH_REPLICAS_MAX];
+};
+
+#endif /* _BCACHEFS_EXTENTS_TYPES_H */
diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h
new file mode 100644
index 000000000000..05429c9631cd
--- /dev/null
+++ b/fs/bcachefs/eytzinger.h
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _EYTZINGER_H
+#define _EYTZINGER_H
+
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "util.h"
+
+/*
+ * Traversal for trees in eytzinger layout - a full binary tree layed out in an
+ * array
+ */
+
+/*
+ * One based indexing version:
+ *
+ * With one based indexing each level of the tree starts at a power of two -
+ * good for cacheline alignment:
+ */
+
+static inline unsigned eytzinger1_child(unsigned i, unsigned child)
+{
+ EBUG_ON(child > 1);
+
+ return (i << 1) + child;
+}
+
+static inline unsigned eytzinger1_left_child(unsigned i)
+{
+ return eytzinger1_child(i, 0);
+}
+
+static inline unsigned eytzinger1_right_child(unsigned i)
+{
+ return eytzinger1_child(i, 1);
+}
+
+static inline unsigned eytzinger1_first(unsigned size)
+{
+ return rounddown_pow_of_two(size);
+}
+
+static inline unsigned eytzinger1_last(unsigned size)
+{
+ return rounddown_pow_of_two(size + 1) - 1;
+}
+
+/*
+ * eytzinger1_next() and eytzinger1_prev() have the nice properties that
+ *
+ * eytzinger1_next(0) == eytzinger1_first())
+ * eytzinger1_prev(0) == eytzinger1_last())
+ *
+ * eytzinger1_prev(eytzinger1_first()) == 0
+ * eytzinger1_next(eytzinger1_last()) == 0
+ */
+
+static inline unsigned eytzinger1_next(unsigned i, unsigned size)
+{
+ EBUG_ON(i > size);
+
+ if (eytzinger1_right_child(i) <= size) {
+ i = eytzinger1_right_child(i);
+
+ i <<= __fls(size + 1) - __fls(i);
+ i >>= i > size;
+ } else {
+ i >>= ffz(i) + 1;
+ }
+
+ return i;
+}
+
+static inline unsigned eytzinger1_prev(unsigned i, unsigned size)
+{
+ EBUG_ON(i > size);
+
+ if (eytzinger1_left_child(i) <= size) {
+ i = eytzinger1_left_child(i) + 1;
+
+ i <<= __fls(size + 1) - __fls(i);
+ i -= 1;
+ i >>= i > size;
+ } else {
+ i >>= __ffs(i) + 1;
+ }
+
+ return i;
+}
+
+static inline unsigned eytzinger1_extra(unsigned size)
+{
+ return (size + 1 - rounddown_pow_of_two(size)) << 1;
+}
+
+static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size,
+ unsigned extra)
+{
+ unsigned b = __fls(i);
+ unsigned shift = __fls(size) - b;
+ int s;
+
+ EBUG_ON(!i || i > size);
+
+ i ^= 1U << b;
+ i <<= 1;
+ i |= 1;
+ i <<= shift;
+
+ /*
+ * sign bit trick:
+ *
+ * if (i > extra)
+ * i -= (i - extra) >> 1;
+ */
+ s = extra - i;
+ i += (s >> 1) & (s >> 31);
+
+ return i;
+}
+
+static inline unsigned __inorder_to_eytzinger1(unsigned i, unsigned size,
+ unsigned extra)
+{
+ unsigned shift;
+ int s;
+
+ EBUG_ON(!i || i > size);
+
+ /*
+ * sign bit trick:
+ *
+ * if (i > extra)
+ * i += i - extra;
+ */
+ s = extra - i;
+ i -= s & (s >> 31);
+
+ shift = __ffs(i);
+
+ i >>= shift + 1;
+ i |= 1U << (__fls(size) - shift);
+
+ return i;
+}
+
+static inline unsigned eytzinger1_to_inorder(unsigned i, unsigned size)
+{
+ return __eytzinger1_to_inorder(i, size, eytzinger1_extra(size));
+}
+
+static inline unsigned inorder_to_eytzinger1(unsigned i, unsigned size)
+{
+ return __inorder_to_eytzinger1(i, size, eytzinger1_extra(size));
+}
+
+#define eytzinger1_for_each(_i, _size) \
+ for ((_i) = eytzinger1_first((_size)); \
+ (_i) != 0; \
+ (_i) = eytzinger1_next((_i), (_size)))
+
+/* Zero based indexing version: */
+
+static inline unsigned eytzinger0_child(unsigned i, unsigned child)
+{
+ EBUG_ON(child > 1);
+
+ return (i << 1) + 1 + child;
+}
+
+static inline unsigned eytzinger0_left_child(unsigned i)
+{
+ return eytzinger0_child(i, 0);
+}
+
+static inline unsigned eytzinger0_right_child(unsigned i)
+{
+ return eytzinger0_child(i, 1);
+}
+
+static inline unsigned eytzinger0_first(unsigned size)
+{
+ return eytzinger1_first(size) - 1;
+}
+
+static inline unsigned eytzinger0_last(unsigned size)
+{
+ return eytzinger1_last(size) - 1;
+}
+
+static inline unsigned eytzinger0_next(unsigned i, unsigned size)
+{
+ return eytzinger1_next(i + 1, size) - 1;
+}
+
+static inline unsigned eytzinger0_prev(unsigned i, unsigned size)
+{
+ return eytzinger1_prev(i + 1, size) - 1;
+}
+
+static inline unsigned eytzinger0_extra(unsigned size)
+{
+ return eytzinger1_extra(size);
+}
+
+static inline unsigned __eytzinger0_to_inorder(unsigned i, unsigned size,
+ unsigned extra)
+{
+ return __eytzinger1_to_inorder(i + 1, size, extra) - 1;
+}
+
+static inline unsigned __inorder_to_eytzinger0(unsigned i, unsigned size,
+ unsigned extra)
+{
+ return __inorder_to_eytzinger1(i + 1, size, extra) - 1;
+}
+
+static inline unsigned eytzinger0_to_inorder(unsigned i, unsigned size)
+{
+ return __eytzinger0_to_inorder(i, size, eytzinger0_extra(size));
+}
+
+static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size)
+{
+ return __inorder_to_eytzinger0(i, size, eytzinger0_extra(size));
+}
+
+#define eytzinger0_for_each(_i, _size) \
+ for ((_i) = eytzinger0_first((_size)); \
+ (_i) != -1; \
+ (_i) = eytzinger0_next((_i), (_size)))
+
+typedef int (*eytzinger_cmp_fn)(const void *l, const void *r, size_t size);
+
+/* return greatest node <= @search, or -1 if not found */
+static inline ssize_t eytzinger0_find_le(void *base, size_t nr, size_t size,
+ eytzinger_cmp_fn cmp, const void *search)
+{
+ unsigned i, n = 0;
+
+ if (!nr)
+ return -1;
+
+ do {
+ i = n;
+ n = eytzinger0_child(i, cmp(search, base + i * size, size) >= 0);
+ } while (n < nr);
+
+ if (n & 1) {
+ /* @i was greater than @search, return previous node: */
+
+ if (i == eytzinger0_first(nr))
+ return -1;
+
+ return eytzinger0_prev(i, nr);
+ } else {
+ return i;
+ }
+}
+
+#define eytzinger0_find(base, nr, size, _cmp, search) \
+({ \
+ void *_base = (base); \
+ void *_search = (search); \
+ size_t _nr = (nr); \
+ size_t _size = (size); \
+ size_t _i = 0; \
+ int _res; \
+ \
+ while (_i < _nr && \
+ (_res = _cmp(_search, _base + _i * _size, _size))) \
+ _i = eytzinger0_child(_i, _res > 0); \
+ _i; \
+})
+
+void eytzinger0_sort(void *, size_t, size_t,
+ int (*cmp_func)(const void *, const void *, size_t),
+ void (*swap_func)(void *, void *, size_t));
+
+#endif /* _EYTZINGER_H */
diff --git a/fs/bcachefs/fifo.h b/fs/bcachefs/fifo.h
new file mode 100644
index 000000000000..66b945be10c2
--- /dev/null
+++ b/fs/bcachefs/fifo.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FIFO_H
+#define _BCACHEFS_FIFO_H
+
+#include "util.h"
+
+#define FIFO(type) \
+struct { \
+ size_t front, back, size, mask; \
+ type *data; \
+}
+
+#define DECLARE_FIFO(type, name) FIFO(type) name
+
+#define fifo_buf_size(fifo) \
+ ((fifo)->size \
+ ? roundup_pow_of_two((fifo)->size) * sizeof((fifo)->data[0]) \
+ : 0)
+
+#define init_fifo(fifo, _size, _gfp) \
+({ \
+ (fifo)->front = (fifo)->back = 0; \
+ (fifo)->size = (_size); \
+ (fifo)->mask = (fifo)->size \
+ ? roundup_pow_of_two((fifo)->size) - 1 \
+ : 0; \
+ (fifo)->data = kvpmalloc(fifo_buf_size(fifo), (_gfp)); \
+})
+
+#define free_fifo(fifo) \
+do { \
+ kvpfree((fifo)->data, fifo_buf_size(fifo)); \
+ (fifo)->data = NULL; \
+} while (0)
+
+#define fifo_swap(l, r) \
+do { \
+ swap((l)->front, (r)->front); \
+ swap((l)->back, (r)->back); \
+ swap((l)->size, (r)->size); \
+ swap((l)->mask, (r)->mask); \
+ swap((l)->data, (r)->data); \
+} while (0)
+
+#define fifo_move(dest, src) \
+do { \
+ typeof(*((dest)->data)) _t; \
+ while (!fifo_full(dest) && \
+ fifo_pop(src, _t)) \
+ fifo_push(dest, _t); \
+} while (0)
+
+#define fifo_used(fifo) (((fifo)->back - (fifo)->front))
+#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
+
+#define fifo_empty(fifo) ((fifo)->front == (fifo)->back)
+#define fifo_full(fifo) (fifo_used(fifo) == (fifo)->size)
+
+#define fifo_peek_front(fifo) ((fifo)->data[(fifo)->front & (fifo)->mask])
+#define fifo_peek_back(fifo) ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
+
+#define fifo_entry_idx_abs(fifo, p) \
+ ((((p) >= &fifo_peek_front(fifo) \
+ ? (fifo)->front : (fifo)->back) & ~(fifo)->mask) + \
+ (((p) - (fifo)->data)))
+
+#define fifo_entry_idx(fifo, p) (((p) - &fifo_peek_front(fifo)) & (fifo)->mask)
+#define fifo_idx_entry(fifo, i) ((fifo)->data[((fifo)->front + (i)) & (fifo)->mask])
+
+#define fifo_push_back_ref(f) \
+ (fifo_full((f)) ? NULL : &(f)->data[(f)->back++ & (f)->mask])
+
+#define fifo_push_front_ref(f) \
+ (fifo_full((f)) ? NULL : &(f)->data[--(f)->front & (f)->mask])
+
+#define fifo_push_back(fifo, new) \
+({ \
+ typeof((fifo)->data) _r = fifo_push_back_ref(fifo); \
+ if (_r) \
+ *_r = (new); \
+ _r != NULL; \
+})
+
+#define fifo_push_front(fifo, new) \
+({ \
+ typeof((fifo)->data) _r = fifo_push_front_ref(fifo); \
+ if (_r) \
+ *_r = (new); \
+ _r != NULL; \
+})
+
+#define fifo_pop_front(fifo, i) \
+({ \
+ bool _r = !fifo_empty((fifo)); \
+ if (_r) \
+ (i) = (fifo)->data[(fifo)->front++ & (fifo)->mask]; \
+ _r; \
+})
+
+#define fifo_pop_back(fifo, i) \
+({ \
+ bool _r = !fifo_empty((fifo)); \
+ if (_r) \
+ (i) = (fifo)->data[--(fifo)->back & (fifo)->mask]; \
+ _r; \
+})
+
+#define fifo_push_ref(fifo) fifo_push_back_ref(fifo)
+#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
+#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
+#define fifo_peek(fifo) fifo_peek_front(fifo)
+
+#define fifo_for_each_entry(_entry, _fifo, _iter) \
+ for (typecheck(typeof((_fifo)->front), _iter), \
+ (_iter) = (_fifo)->front; \
+ ((_iter != (_fifo)->back) && \
+ (_entry = (_fifo)->data[(_iter) & (_fifo)->mask], true)); \
+ (_iter)++)
+
+#define fifo_for_each_entry_ptr(_ptr, _fifo, _iter) \
+ for (typecheck(typeof((_fifo)->front), _iter), \
+ (_iter) = (_fifo)->front; \
+ ((_iter != (_fifo)->back) && \
+ (_ptr = &(_fifo)->data[(_iter) & (_fifo)->mask], true)); \
+ (_iter)++)
+
+#endif /* _BCACHEFS_FIFO_H */
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
new file mode 100644
index 000000000000..bb5305441f27
--- /dev/null
+++ b/fs/bcachefs/fs-common.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "acl.h"
+#include "btree_update.h"
+#include "dirent.h"
+#include "fs-common.h"
+#include "inode.h"
+#include "subvolume.h"
+#include "xattr.h"
+
+#include <linux/posix_acl.h>
+
+static inline int is_subdir_for_nlink(struct bch_inode_unpacked *inode)
+{
+ return S_ISDIR(inode->bi_mode) && !inode->bi_subvol;
+}
+
+int bch2_create_trans(struct btree_trans *trans,
+ subvol_inum dir,
+ struct bch_inode_unpacked *dir_u,
+ struct bch_inode_unpacked *new_inode,
+ const struct qstr *name,
+ uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
+ struct posix_acl *default_acl,
+ struct posix_acl *acl,
+ subvol_inum snapshot_src,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter dir_iter = { NULL };
+ struct btree_iter inode_iter = { NULL };
+ subvol_inum new_inum = dir;
+ u64 now = bch2_current_time(c);
+ u64 cpu = raw_smp_processor_id();
+ u64 dir_target;
+ u32 snapshot;
+ unsigned dir_type = mode_to_type(mode);
+ int ret;
+
+ ret = bch2_subvolume_get_snapshot(trans, dir.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ if (!(flags & BCH_CREATE_SNAPSHOT)) {
+ /* Normal create path - allocate a new inode: */
+ bch2_inode_init_late(new_inode, now, uid, gid, mode, rdev, dir_u);
+
+ if (flags & BCH_CREATE_TMPFILE)
+ new_inode->bi_flags |= BCH_INODE_UNLINKED;
+
+ ret = bch2_inode_create(trans, &inode_iter, new_inode, snapshot, cpu);
+ if (ret)
+ goto err;
+
+ snapshot_src = (subvol_inum) { 0 };
+ } else {
+ /*
+ * Creating a snapshot - we're not allocating a new inode, but
+ * we do have to lookup the root inode of the subvolume we're
+ * snapshotting and update it (in the new snapshot):
+ */
+
+ if (!snapshot_src.inum) {
+ /* Inode wasn't specified, just snapshot: */
+ struct bch_subvolume s;
+
+ ret = bch2_subvolume_get(trans, snapshot_src.subvol, true,
+ BTREE_ITER_CACHED, &s);
+ if (ret)
+ goto err;
+
+ snapshot_src.inum = le64_to_cpu(s.inode);
+ }
+
+ ret = bch2_inode_peek(trans, &inode_iter, new_inode, snapshot_src,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ if (new_inode->bi_subvol != snapshot_src.subvol) {
+ /* Not a subvolume root: */
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * If we're not root, we have to own the subvolume being
+ * snapshotted:
+ */
+ if (uid && new_inode->bi_uid != uid) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ flags |= BCH_CREATE_SUBVOL;
+ }
+
+ new_inum.inum = new_inode->bi_inum;
+ dir_target = new_inode->bi_inum;
+
+ if (flags & BCH_CREATE_SUBVOL) {
+ u32 new_subvol, dir_snapshot;
+
+ ret = bch2_subvolume_create(trans, new_inode->bi_inum,
+ snapshot_src.subvol,
+ &new_subvol, &snapshot,
+ (flags & BCH_CREATE_SNAPSHOT_RO) != 0);
+ if (ret)
+ goto err;
+
+ new_inode->bi_parent_subvol = dir.subvol;
+ new_inode->bi_subvol = new_subvol;
+ new_inum.subvol = new_subvol;
+ dir_target = new_subvol;
+ dir_type = DT_SUBVOL;
+
+ ret = bch2_subvolume_get_snapshot(trans, dir.subvol, &dir_snapshot);
+ if (ret)
+ goto err;
+
+ bch2_btree_iter_set_snapshot(&dir_iter, dir_snapshot);
+ ret = bch2_btree_iter_traverse(&dir_iter);
+ if (ret)
+ goto err;
+ }
+
+ if (!(flags & BCH_CREATE_SNAPSHOT)) {
+ if (default_acl) {
+ ret = bch2_set_acl_trans(trans, new_inum, new_inode,
+ default_acl, ACL_TYPE_DEFAULT);
+ if (ret)
+ goto err;
+ }
+
+ if (acl) {
+ ret = bch2_set_acl_trans(trans, new_inum, new_inode,
+ acl, ACL_TYPE_ACCESS);
+ if (ret)
+ goto err;
+ }
+ }
+
+ if (!(flags & BCH_CREATE_TMPFILE)) {
+ struct bch_hash_info dir_hash = bch2_hash_info_init(c, dir_u);
+ u64 dir_offset;
+
+ if (is_subdir_for_nlink(new_inode))
+ dir_u->bi_nlink++;
+ dir_u->bi_mtime = dir_u->bi_ctime = now;
+
+ ret = bch2_inode_write(trans, &dir_iter, dir_u);
+ if (ret)
+ goto err;
+
+ ret = bch2_dirent_create(trans, dir, &dir_hash,
+ dir_type,
+ name,
+ dir_target,
+ &dir_offset,
+ BCH_HASH_SET_MUST_CREATE);
+ if (ret)
+ goto err;
+
+ if (c->sb.version >= bcachefs_metadata_version_inode_backpointers) {
+ new_inode->bi_dir = dir_u->bi_inum;
+ new_inode->bi_dir_offset = dir_offset;
+ }
+ }
+
+ inode_iter.flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
+ bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
+
+ ret = bch2_btree_iter_traverse(&inode_iter) ?:
+ bch2_inode_write(trans, &inode_iter, new_inode);
+err:
+ bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(trans, &dir_iter);
+ return ret;
+}
+
+int bch2_link_trans(struct btree_trans *trans,
+ subvol_inum dir, struct bch_inode_unpacked *dir_u,
+ subvol_inum inum, struct bch_inode_unpacked *inode_u,
+ const struct qstr *name)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter dir_iter = { NULL };
+ struct btree_iter inode_iter = { NULL };
+ struct bch_hash_info dir_hash;
+ u64 now = bch2_current_time(c);
+ u64 dir_offset = 0;
+ int ret;
+
+ if (dir.subvol != inum.subvol)
+ return -EXDEV;
+
+ ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ inode_u->bi_ctime = now;
+ ret = bch2_inode_nlink_inc(inode_u);
+ if (ret)
+ return ret;
+
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ if (bch2_reinherit_attrs(inode_u, dir_u)) {
+ ret = -EXDEV;
+ goto err;
+ }
+
+ dir_u->bi_mtime = dir_u->bi_ctime = now;
+
+ dir_hash = bch2_hash_info_init(c, dir_u);
+
+ ret = bch2_dirent_create(trans, dir, &dir_hash,
+ mode_to_type(inode_u->bi_mode),
+ name, inum.inum, &dir_offset,
+ BCH_HASH_SET_MUST_CREATE);
+ if (ret)
+ goto err;
+
+ if (c->sb.version >= bcachefs_metadata_version_inode_backpointers) {
+ inode_u->bi_dir = dir.inum;
+ inode_u->bi_dir_offset = dir_offset;
+ }
+
+ ret = bch2_inode_write(trans, &dir_iter, dir_u) ?:
+ bch2_inode_write(trans, &inode_iter, inode_u);
+err:
+ bch2_trans_iter_exit(trans, &dir_iter);
+ bch2_trans_iter_exit(trans, &inode_iter);
+ return ret;
+}
+
+int bch2_unlink_trans(struct btree_trans *trans,
+ subvol_inum dir,
+ struct bch_inode_unpacked *dir_u,
+ struct bch_inode_unpacked *inode_u,
+ const struct qstr *name,
+ bool deleting_snapshot)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter dir_iter = { NULL };
+ struct btree_iter dirent_iter = { NULL };
+ struct btree_iter inode_iter = { NULL };
+ struct bch_hash_info dir_hash;
+ subvol_inum inum;
+ u64 now = bch2_current_time(c);
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ dir_hash = bch2_hash_info_init(c, dir_u);
+
+ ret = __bch2_dirent_lookup_trans(trans, &dirent_iter, dir, &dir_hash,
+ name, &inum, BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ if (!deleting_snapshot && S_ISDIR(inode_u->bi_mode)) {
+ ret = bch2_empty_dir_trans(trans, inum);
+ if (ret)
+ goto err;
+ }
+
+ if (deleting_snapshot && !inode_u->bi_subvol) {
+ ret = -BCH_ERR_ENOENT_not_subvol;
+ goto err;
+ }
+
+ if (deleting_snapshot || inode_u->bi_subvol) {
+ ret = bch2_subvolume_unlink(trans, inode_u->bi_subvol);
+ if (ret)
+ goto err;
+
+ k = bch2_btree_iter_peek_slot(&dirent_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ /*
+ * If we're deleting a subvolume, we need to really delete the
+ * dirent, not just emit a whiteout in the current snapshot:
+ */
+ bch2_btree_iter_set_snapshot(&dirent_iter, k.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(&dirent_iter);
+ if (ret)
+ goto err;
+ } else {
+ bch2_inode_nlink_dec(trans, inode_u);
+ }
+
+ if (inode_u->bi_dir == dirent_iter.pos.inode &&
+ inode_u->bi_dir_offset == dirent_iter.pos.offset) {
+ inode_u->bi_dir = 0;
+ inode_u->bi_dir_offset = 0;
+ }
+
+ dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now;
+ dir_u->bi_nlink -= is_subdir_for_nlink(inode_u);
+
+ ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
+ &dir_hash, &dirent_iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_inode_write(trans, &dir_iter, dir_u) ?:
+ bch2_inode_write(trans, &inode_iter, inode_u);
+err:
+ bch2_trans_iter_exit(trans, &inode_iter);
+ bch2_trans_iter_exit(trans, &dirent_iter);
+ bch2_trans_iter_exit(trans, &dir_iter);
+ return ret;
+}
+
+bool bch2_reinherit_attrs(struct bch_inode_unpacked *dst_u,
+ struct bch_inode_unpacked *src_u)
+{
+ u64 src, dst;
+ unsigned id;
+ bool ret = false;
+
+ for (id = 0; id < Inode_opt_nr; id++) {
+ /* Skip attributes that were explicitly set on this inode */
+ if (dst_u->bi_fields_set & (1 << id))
+ continue;
+
+ src = bch2_inode_opt_get(src_u, id);
+ dst = bch2_inode_opt_get(dst_u, id);
+
+ if (src == dst)
+ continue;
+
+ bch2_inode_opt_set(dst_u, id, src);
+ ret = true;
+ }
+
+ return ret;
+}
+
+int bch2_rename_trans(struct btree_trans *trans,
+ subvol_inum src_dir, struct bch_inode_unpacked *src_dir_u,
+ subvol_inum dst_dir, struct bch_inode_unpacked *dst_dir_u,
+ struct bch_inode_unpacked *src_inode_u,
+ struct bch_inode_unpacked *dst_inode_u,
+ const struct qstr *src_name,
+ const struct qstr *dst_name,
+ enum bch_rename_mode mode)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter src_dir_iter = { NULL };
+ struct btree_iter dst_dir_iter = { NULL };
+ struct btree_iter src_inode_iter = { NULL };
+ struct btree_iter dst_inode_iter = { NULL };
+ struct bch_hash_info src_hash, dst_hash;
+ subvol_inum src_inum, dst_inum;
+ u64 src_offset, dst_offset;
+ u64 now = bch2_current_time(c);
+ int ret;
+
+ ret = bch2_inode_peek(trans, &src_dir_iter, src_dir_u, src_dir,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ src_hash = bch2_hash_info_init(c, src_dir_u);
+
+ if (dst_dir.inum != src_dir.inum ||
+ dst_dir.subvol != src_dir.subvol) {
+ ret = bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ dst_hash = bch2_hash_info_init(c, dst_dir_u);
+ } else {
+ dst_dir_u = src_dir_u;
+ dst_hash = src_hash;
+ }
+
+ ret = bch2_dirent_rename(trans,
+ src_dir, &src_hash,
+ dst_dir, &dst_hash,
+ src_name, &src_inum, &src_offset,
+ dst_name, &dst_inum, &dst_offset,
+ mode);
+ if (ret)
+ goto err;
+
+ ret = bch2_inode_peek(trans, &src_inode_iter, src_inode_u, src_inum,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+
+ if (dst_inum.inum) {
+ ret = bch2_inode_peek(trans, &dst_inode_iter, dst_inode_u, dst_inum,
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto err;
+ }
+
+ if (c->sb.version >= bcachefs_metadata_version_inode_backpointers) {
+ src_inode_u->bi_dir = dst_dir_u->bi_inum;
+ src_inode_u->bi_dir_offset = dst_offset;
+
+ if (mode == BCH_RENAME_EXCHANGE) {
+ dst_inode_u->bi_dir = src_dir_u->bi_inum;
+ dst_inode_u->bi_dir_offset = src_offset;
+ }
+
+ if (mode == BCH_RENAME_OVERWRITE &&
+ dst_inode_u->bi_dir == dst_dir_u->bi_inum &&
+ dst_inode_u->bi_dir_offset == src_offset) {
+ dst_inode_u->bi_dir = 0;
+ dst_inode_u->bi_dir_offset = 0;
+ }
+ }
+
+ if (mode == BCH_RENAME_OVERWRITE) {
+ if (S_ISDIR(src_inode_u->bi_mode) !=
+ S_ISDIR(dst_inode_u->bi_mode)) {
+ ret = -ENOTDIR;
+ goto err;
+ }
+
+ if (S_ISDIR(dst_inode_u->bi_mode) &&
+ bch2_empty_dir_trans(trans, dst_inum)) {
+ ret = -ENOTEMPTY;
+ goto err;
+ }
+ }
+
+ if (bch2_reinherit_attrs(src_inode_u, dst_dir_u) &&
+ S_ISDIR(src_inode_u->bi_mode)) {
+ ret = -EXDEV;
+ goto err;
+ }
+
+ if (mode == BCH_RENAME_EXCHANGE &&
+ bch2_reinherit_attrs(dst_inode_u, src_dir_u) &&
+ S_ISDIR(dst_inode_u->bi_mode)) {
+ ret = -EXDEV;
+ goto err;
+ }
+
+ if (is_subdir_for_nlink(src_inode_u)) {
+ src_dir_u->bi_nlink--;
+ dst_dir_u->bi_nlink++;
+ }
+
+ if (dst_inum.inum && is_subdir_for_nlink(dst_inode_u)) {
+ dst_dir_u->bi_nlink--;
+ src_dir_u->bi_nlink += mode == BCH_RENAME_EXCHANGE;
+ }
+
+ if (mode == BCH_RENAME_OVERWRITE)
+ bch2_inode_nlink_dec(trans, dst_inode_u);
+
+ src_dir_u->bi_mtime = now;
+ src_dir_u->bi_ctime = now;
+
+ if (src_dir.inum != dst_dir.inum) {
+ dst_dir_u->bi_mtime = now;
+ dst_dir_u->bi_ctime = now;
+ }
+
+ src_inode_u->bi_ctime = now;
+
+ if (dst_inum.inum)
+ dst_inode_u->bi_ctime = now;
+
+ ret = bch2_inode_write(trans, &src_dir_iter, src_dir_u) ?:
+ (src_dir.inum != dst_dir.inum
+ ? bch2_inode_write(trans, &dst_dir_iter, dst_dir_u)
+ : 0) ?:
+ bch2_inode_write(trans, &src_inode_iter, src_inode_u) ?:
+ (dst_inum.inum
+ ? bch2_inode_write(trans, &dst_inode_iter, dst_inode_u)
+ : 0);
+err:
+ bch2_trans_iter_exit(trans, &dst_inode_iter);
+ bch2_trans_iter_exit(trans, &src_inode_iter);
+ bch2_trans_iter_exit(trans, &dst_dir_iter);
+ bch2_trans_iter_exit(trans, &src_dir_iter);
+ return ret;
+}
diff --git a/fs/bcachefs/fs-common.h b/fs/bcachefs/fs-common.h
new file mode 100644
index 000000000000..dde237859514
--- /dev/null
+++ b/fs/bcachefs/fs-common.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FS_COMMON_H
+#define _BCACHEFS_FS_COMMON_H
+
+struct posix_acl;
+
+#define BCH_CREATE_TMPFILE (1U << 0)
+#define BCH_CREATE_SUBVOL (1U << 1)
+#define BCH_CREATE_SNAPSHOT (1U << 2)
+#define BCH_CREATE_SNAPSHOT_RO (1U << 3)
+
+int bch2_create_trans(struct btree_trans *, subvol_inum,
+ struct bch_inode_unpacked *,
+ struct bch_inode_unpacked *,
+ const struct qstr *,
+ uid_t, gid_t, umode_t, dev_t,
+ struct posix_acl *,
+ struct posix_acl *,
+ subvol_inum, unsigned);
+
+int bch2_link_trans(struct btree_trans *,
+ subvol_inum, struct bch_inode_unpacked *,
+ subvol_inum, struct bch_inode_unpacked *,
+ const struct qstr *);
+
+int bch2_unlink_trans(struct btree_trans *, subvol_inum,
+ struct bch_inode_unpacked *,
+ struct bch_inode_unpacked *,
+ const struct qstr *, bool);
+
+int bch2_rename_trans(struct btree_trans *,
+ subvol_inum, struct bch_inode_unpacked *,
+ subvol_inum, struct bch_inode_unpacked *,
+ struct bch_inode_unpacked *,
+ struct bch_inode_unpacked *,
+ const struct qstr *,
+ const struct qstr *,
+ enum bch_rename_mode);
+
+bool bch2_reinherit_attrs(struct bch_inode_unpacked *,
+ struct bch_inode_unpacked *);
+
+#endif /* _BCACHEFS_FS_COMMON_H */
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
new file mode 100644
index 000000000000..58ccc7b91ac7
--- /dev/null
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -0,0 +1,1093 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_FS
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_buf.h"
+#include "fs-io.h"
+#include "fs-io-buffered.h"
+#include "fs-io-direct.h"
+#include "fs-io-pagecache.h"
+#include "io_read.h"
+#include "io_write.h"
+
+#include <linux/backing-dev.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+
+static inline bool bio_full(struct bio *bio, unsigned len)
+{
+ if (bio->bi_vcnt >= bio->bi_max_vecs)
+ return true;
+ if (bio->bi_iter.bi_size > UINT_MAX - len)
+ return true;
+ return false;
+}
+
+/* readpage(s): */
+
+static void bch2_readpages_end_io(struct bio *bio)
+{
+ struct folio_iter fi;
+
+ bio_for_each_folio_all(fi, bio) {
+ if (!bio->bi_status) {
+ folio_mark_uptodate(fi.folio);
+ } else {
+ folio_clear_uptodate(fi.folio);
+ folio_set_error(fi.folio);
+ }
+ folio_unlock(fi.folio);
+ }
+
+ bio_put(bio);
+}
+
+struct readpages_iter {
+ struct address_space *mapping;
+ unsigned idx;
+ folios folios;
+};
+
+static int readpages_iter_init(struct readpages_iter *iter,
+ struct readahead_control *ractl)
+{
+ struct folio **fi;
+ int ret;
+
+ memset(iter, 0, sizeof(*iter));
+
+ iter->mapping = ractl->mapping;
+
+ ret = bch2_filemap_get_contig_folios_d(iter->mapping,
+ ractl->_index << PAGE_SHIFT,
+ (ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
+ 0, mapping_gfp_mask(iter->mapping),
+ &iter->folios);
+ if (ret)
+ return ret;
+
+ darray_for_each(iter->folios, fi) {
+ ractl->_nr_pages -= 1U << folio_order(*fi);
+ __bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
+ folio_put(*fi);
+ folio_put(*fi);
+ }
+
+ return 0;
+}
+
+static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
+{
+ if (iter->idx >= iter->folios.nr)
+ return NULL;
+ return iter->folios.data[iter->idx];
+}
+
+static inline void readpage_iter_advance(struct readpages_iter *iter)
+{
+ iter->idx++;
+}
+
+static bool extent_partial_reads_expensive(struct bkey_s_c k)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bch_extent_crc_unpacked crc;
+ const union bch_extent_entry *i;
+
+ bkey_for_each_crc(k.k, ptrs, crc, i)
+ if (crc.csum_type || crc.compression_type)
+ return true;
+ return false;
+}
+
+static int readpage_bio_extend(struct btree_trans *trans,
+ struct readpages_iter *iter,
+ struct bio *bio,
+ unsigned sectors_this_extent,
+ bool get_more)
+{
+ /* Don't hold btree locks while allocating memory: */
+ bch2_trans_unlock(trans);
+
+ while (bio_sectors(bio) < sectors_this_extent &&
+ bio->bi_vcnt < bio->bi_max_vecs) {
+ struct folio *folio = readpage_iter_peek(iter);
+ int ret;
+
+ if (folio) {
+ readpage_iter_advance(iter);
+ } else {
+ pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
+
+ if (!get_more)
+ break;
+
+ folio = xa_load(&iter->mapping->i_pages, folio_offset);
+ if (folio && !xa_is_value(folio))
+ break;
+
+ folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
+ if (!folio)
+ break;
+
+ if (!__bch2_folio_create(folio, GFP_KERNEL)) {
+ folio_put(folio);
+ break;
+ }
+
+ ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
+ if (ret) {
+ __bch2_folio_release(folio);
+ folio_put(folio);
+ break;
+ }
+
+ folio_put(folio);
+ }
+
+ BUG_ON(folio_sector(folio) != bio_end_sector(bio));
+
+ BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
+ }
+
+ return bch2_trans_relock(trans);
+}
+
+static void bchfs_read(struct btree_trans *trans,
+ struct bch_read_bio *rbio,
+ subvol_inum inum,
+ struct readpages_iter *readpages_iter)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_buf sk;
+ int flags = BCH_READ_RETRY_IF_STALE|
+ BCH_READ_MAY_PROMOTE;
+ u32 snapshot;
+ int ret = 0;
+
+ rbio->c = c;
+ rbio->start_time = local_clock();
+ rbio->subvol = inum.subvol;
+
+ bch2_bkey_buf_init(&sk);
+retry:
+ bch2_trans_begin(trans);
+ iter = (struct btree_iter) { NULL };
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
+ BTREE_ITER_SLOTS);
+ while (1) {
+ struct bkey_s_c k;
+ unsigned bytes, sectors, offset_into_extent;
+ enum btree_id data_btree = BTREE_ID_extents;
+
+ /*
+ * read_extent -> io_time_reset may cause a transaction restart
+ * without returning an error, we need to check for that here:
+ */
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ break;
+
+ bch2_btree_iter_set_pos(&iter,
+ POS(inum.inum, rbio->bio.bi_iter.bi_sector));
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
+
+ offset_into_extent = iter.pos.offset -
+ bkey_start_offset(k.k);
+ sectors = k.k->size - offset_into_extent;
+
+ bch2_bkey_buf_reassemble(&sk, c, k);
+
+ ret = bch2_read_indirect_extent(trans, &data_btree,
+ &offset_into_extent, &sk);
+ if (ret)
+ break;
+
+ k = bkey_i_to_s_c(sk.k);
+
+ sectors = min(sectors, k.k->size - offset_into_extent);
+
+ if (readpages_iter) {
+ ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
+ extent_partial_reads_expensive(k));
+ if (ret)
+ break;
+ }
+
+ bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
+ swap(rbio->bio.bi_iter.bi_size, bytes);
+
+ if (rbio->bio.bi_iter.bi_size == bytes)
+ flags |= BCH_READ_LAST_FRAGMENT;
+
+ bch2_bio_page_state_set(&rbio->bio, k);
+
+ bch2_read_extent(trans, rbio, iter.pos,
+ data_btree, k, offset_into_extent, flags);
+
+ if (flags & BCH_READ_LAST_FRAGMENT)
+ break;
+
+ swap(rbio->bio.bi_iter.bi_size, bytes);
+ bio_advance(&rbio->bio, bytes);
+
+ ret = btree_trans_too_many_iters(trans);
+ if (ret)
+ break;
+ }
+err:
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ if (ret) {
+ bch_err_inum_offset_ratelimited(c,
+ iter.pos.inode,
+ iter.pos.offset << 9,
+ "read error %i from btree lookup", ret);
+ rbio->bio.bi_status = BLK_STS_IOERR;
+ bio_endio(&rbio->bio);
+ }
+
+ bch2_bkey_buf_exit(&sk, c);
+}
+
+void bch2_readahead(struct readahead_control *ractl)
+{
+ struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_io_opts opts;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct folio *folio;
+ struct readpages_iter readpages_iter;
+ int ret;
+
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ ret = readpages_iter_init(&readpages_iter, ractl);
+ BUG_ON(ret);
+
+ bch2_pagecache_add_get(inode);
+
+ while ((folio = readpage_iter_peek(&readpages_iter))) {
+ unsigned n = min_t(unsigned,
+ readpages_iter.folios.nr -
+ readpages_iter.idx,
+ BIO_MAX_VECS);
+ struct bch_read_bio *rbio =
+ rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
+ GFP_KERNEL, &c->bio_read),
+ opts);
+
+ readpage_iter_advance(&readpages_iter);
+
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
+ rbio->bio.bi_end_io = bch2_readpages_end_io;
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
+
+ bchfs_read(trans, rbio, inode_inum(inode),
+ &readpages_iter);
+ bch2_trans_unlock(trans);
+ }
+
+ bch2_pagecache_add_put(inode);
+
+ bch2_trans_put(trans);
+ darray_exit(&readpages_iter.folios);
+}
+
+static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
+ subvol_inum inum, struct folio *folio)
+{
+ bch2_folio_create(folio, __GFP_NOFAIL);
+
+ rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
+ rbio->bio.bi_iter.bi_sector = folio_sector(folio);
+ BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
+
+ bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0));
+}
+
+static void bch2_read_single_folio_end_io(struct bio *bio)
+{
+ complete(bio->bi_private);
+}
+
+int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
+{
+ struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_read_bio *rbio;
+ struct bch_io_opts opts;
+ int ret;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
+ opts);
+ rbio->bio.bi_private = &done;
+ rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
+
+ __bchfs_readfolio(c, rbio, inode_inum(inode), folio);
+ wait_for_completion(&done);
+
+ ret = blk_status_to_errno(rbio->bio.bi_status);
+ bio_put(&rbio->bio);
+
+ if (ret < 0)
+ return ret;
+
+ folio_mark_uptodate(folio);
+ return 0;
+}
+
+int bch2_read_folio(struct file *file, struct folio *folio)
+{
+ int ret;
+
+ ret = bch2_read_single_folio(folio, folio->mapping);
+ folio_unlock(folio);
+ return bch2_err_class(ret);
+}
+
+/* writepages: */
+
+struct bch_writepage_io {
+ struct bch_inode_info *inode;
+
+ /* must be last: */
+ struct bch_write_op op;
+};
+
+struct bch_writepage_state {
+ struct bch_writepage_io *io;
+ struct bch_io_opts opts;
+ struct bch_folio_sector *tmp;
+ unsigned tmp_sectors;
+};
+
+static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct bch_writepage_state ret = { 0 };
+
+ bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
+ return ret;
+}
+
+static void bch2_writepage_io_done(struct bch_write_op *op)
+{
+ struct bch_writepage_io *io =
+ container_of(op, struct bch_writepage_io, op);
+ struct bch_fs *c = io->op.c;
+ struct bio *bio = &io->op.wbio.bio;
+ struct folio_iter fi;
+ unsigned i;
+
+ if (io->op.error) {
+ set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
+
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s;
+
+ folio_set_error(fi.folio);
+ mapping_set_error(fi.folio->mapping, -EIO);
+
+ s = __bch2_folio(fi.folio);
+ spin_lock(&s->lock);
+ for (i = 0; i < folio_sectors(fi.folio); i++)
+ s->s[i].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+ }
+
+ if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s;
+
+ s = __bch2_folio(fi.folio);
+ spin_lock(&s->lock);
+ for (i = 0; i < folio_sectors(fi.folio); i++)
+ s->s[i].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+ }
+
+ /*
+ * racing with fallocate can cause us to add fewer sectors than
+ * expected - but we shouldn't add more sectors than expected:
+ */
+ WARN_ON_ONCE(io->op.i_sectors_delta > 0);
+
+ /*
+ * (error (due to going RO) halfway through a page can screw that up
+ * slightly)
+ * XXX wtf?
+ BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
+ */
+
+ /*
+ * PageWriteback is effectively our ref on the inode - fixup i_blocks
+ * before calling end_page_writeback:
+ */
+ bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
+
+ bio_for_each_folio_all(fi, bio) {
+ struct bch_folio *s = __bch2_folio(fi.folio);
+
+ if (atomic_dec_and_test(&s->write_count))
+ folio_end_writeback(fi.folio);
+ }
+
+ bio_put(&io->op.wbio.bio);
+}
+
+static void bch2_writepage_do_io(struct bch_writepage_state *w)
+{
+ struct bch_writepage_io *io = w->io;
+
+ w->io = NULL;
+ closure_call(&io->op.cl, bch2_write, NULL, NULL);
+}
+
+/*
+ * Get a bch_writepage_io and add @page to it - appending to an existing one if
+ * possible, else allocating a new one:
+ */
+static void bch2_writepage_io_alloc(struct bch_fs *c,
+ struct writeback_control *wbc,
+ struct bch_writepage_state *w,
+ struct bch_inode_info *inode,
+ u64 sector,
+ unsigned nr_replicas)
+{
+ struct bch_write_op *op;
+
+ w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
+ REQ_OP_WRITE,
+ GFP_KERNEL,
+ &c->writepage_bioset),
+ struct bch_writepage_io, op.wbio.bio);
+
+ w->io->inode = inode;
+ op = &w->io->op;
+ bch2_write_op_init(op, c, w->opts);
+ op->target = w->opts.foreground_target;
+ op->nr_replicas = nr_replicas;
+ op->res.nr_replicas = nr_replicas;
+ op->write_point = writepoint_hashed(inode->ei_last_dirtied);
+ op->subvol = inode->ei_subvol;
+ op->pos = POS(inode->v.i_ino, sector);
+ op->end_io = bch2_writepage_io_done;
+ op->devs_need_flush = &inode->ei_devs_need_flush;
+ op->wbio.bio.bi_iter.bi_sector = sector;
+ op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
+}
+
+static int __bch2_writepage(struct folio *folio,
+ struct writeback_control *wbc,
+ void *data)
+{
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_writepage_state *w = data;
+ struct bch_folio *s;
+ unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
+ loff_t i_size = i_size_read(&inode->v);
+ int ret;
+
+ EBUG_ON(!folio_test_uptodate(folio));
+
+ /* Is the folio fully inside i_size? */
+ if (folio_end_pos(folio) <= i_size)
+ goto do_io;
+
+ /* Is the folio fully outside i_size? (truncate in progress) */
+ if (folio_pos(folio) >= i_size) {
+ folio_unlock(folio);
+ return 0;
+ }
+
+ /*
+ * The folio straddles i_size. It must be zeroed out on each and every
+ * writepage invocation because it may be mmapped. "A file is mapped
+ * in multiples of the folio size. For a file that is not a multiple of
+ * the folio size, the remaining memory is zeroed when mapped, and
+ * writes to that region are not written out to the file."
+ */
+ folio_zero_segment(folio,
+ i_size - folio_pos(folio),
+ folio_size(folio));
+do_io:
+ f_sectors = folio_sectors(folio);
+ s = bch2_folio(folio);
+
+ if (f_sectors > w->tmp_sectors) {
+ kfree(w->tmp);
+ w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), __GFP_NOFAIL);
+ w->tmp_sectors = f_sectors;
+ }
+
+ /*
+ * Things get really hairy with errors during writeback:
+ */
+ ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
+ BUG_ON(ret);
+
+ /* Before unlocking the page, get copy of reservations: */
+ spin_lock(&s->lock);
+ memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
+
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
+ continue;
+
+ nr_replicas_this_write =
+ min_t(unsigned, nr_replicas_this_write,
+ s->s[i].nr_replicas +
+ s->s[i].replicas_reserved);
+ }
+
+ for (i = 0; i < f_sectors; i++) {
+ if (s->s[i].state < SECTOR_dirty)
+ continue;
+
+ s->s[i].nr_replicas = w->opts.compression
+ ? 0 : nr_replicas_this_write;
+
+ s->s[i].replicas_reserved = 0;
+ bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
+ }
+ spin_unlock(&s->lock);
+
+ BUG_ON(atomic_read(&s->write_count));
+ atomic_set(&s->write_count, 1);
+
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
+
+ folio_unlock(folio);
+
+ offset = 0;
+ while (1) {
+ unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
+ u64 sector;
+
+ while (offset < f_sectors &&
+ w->tmp[offset].state < SECTOR_dirty)
+ offset++;
+
+ if (offset == f_sectors)
+ break;
+
+ while (offset + sectors < f_sectors &&
+ w->tmp[offset + sectors].state >= SECTOR_dirty) {
+ reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
+ dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
+ sectors++;
+ }
+ BUG_ON(!sectors);
+
+ sector = folio_sector(folio) + offset;
+
+ if (w->io &&
+ (w->io->op.res.nr_replicas != nr_replicas_this_write ||
+ bio_full(&w->io->op.wbio.bio, sectors << 9) ||
+ w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
+ (BIO_MAX_VECS * PAGE_SIZE) ||
+ bio_end_sector(&w->io->op.wbio.bio) != sector))
+ bch2_writepage_do_io(w);
+
+ if (!w->io)
+ bch2_writepage_io_alloc(c, wbc, w, inode, sector,
+ nr_replicas_this_write);
+
+ atomic_inc(&s->write_count);
+
+ BUG_ON(inode != w->io->inode);
+ BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
+ sectors << 9, offset << 9));
+
+ /* Check for writing past i_size: */
+ WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
+ round_up(i_size, block_bytes(c)) &&
+ !test_bit(BCH_FS_EMERGENCY_RO, &c->flags),
+ "writing past i_size: %llu > %llu (unrounded %llu)\n",
+ bio_end_sector(&w->io->op.wbio.bio) << 9,
+ round_up(i_size, block_bytes(c)),
+ i_size);
+
+ w->io->op.res.sectors += reserved_sectors;
+ w->io->op.i_sectors_delta -= dirty_sectors;
+ w->io->op.new_i_size = i_size;
+
+ offset += sectors;
+ }
+
+ if (atomic_dec_and_test(&s->write_count))
+ folio_end_writeback(folio);
+
+ return 0;
+}
+
+int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
+{
+ struct bch_fs *c = mapping->host->i_sb->s_fs_info;
+ struct bch_writepage_state w =
+ bch_writepage_state_init(c, to_bch_ei(mapping->host));
+ struct blk_plug plug;
+ int ret;
+
+ blk_start_plug(&plug);
+ ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
+ if (w.io)
+ bch2_writepage_do_io(&w);
+ blk_finish_plug(&plug);
+ kfree(w.tmp);
+ return bch2_err_class(ret);
+}
+
+/* buffered writes: */
+
+int bch2_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct page **pagep, void **fsdata)
+{
+ struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation *res;
+ struct folio *folio;
+ unsigned offset;
+ int ret = -ENOMEM;
+
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ bch2_folio_reservation_init(c, inode, res);
+ *fsdata = res;
+
+ bch2_pagecache_add_get(inode);
+
+ folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
+ FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR_OR_NULL(folio))
+ goto err_unlock;
+
+ offset = pos - folio_pos(folio);
+ len = min_t(size_t, len, folio_end_pos(folio) - pos);
+
+ if (folio_test_uptodate(folio))
+ goto out;
+
+ /* If we're writing entire folio, don't need to read it in first: */
+ if (!offset && len == folio_size(folio))
+ goto out;
+
+ if (!offset && pos + len >= inode->v.i_size) {
+ folio_zero_segment(folio, len, folio_size(folio));
+ flush_dcache_folio(folio);
+ goto out;
+ }
+
+ if (folio_pos(folio) >= inode->v.i_size) {
+ folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
+ flush_dcache_folio(folio);
+ goto out;
+ }
+readpage:
+ ret = bch2_read_single_folio(folio, mapping);
+ if (ret)
+ goto err;
+out:
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto err;
+
+ ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
+ if (ret) {
+ if (!folio_test_uptodate(folio)) {
+ /*
+ * If the folio hasn't been read in, we won't know if we
+ * actually need a reservation - we don't actually need
+ * to read here, we just need to check if the folio is
+ * fully backed by uncompressed data:
+ */
+ goto readpage;
+ }
+
+ goto err;
+ }
+
+ *pagep = &folio->page;
+ return 0;
+err:
+ folio_unlock(folio);
+ folio_put(folio);
+ *pagep = NULL;
+err_unlock:
+ bch2_pagecache_add_put(inode);
+ kfree(res);
+ *fsdata = NULL;
+ return bch2_err_class(ret);
+}
+
+int bch2_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct bch_inode_info *inode = to_bch_ei(mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation *res = fsdata;
+ struct folio *folio = page_folio(page);
+ unsigned offset = pos - folio_pos(folio);
+
+ lockdep_assert_held(&inode->v.i_rwsem);
+ BUG_ON(offset + copied > folio_size(folio));
+
+ if (unlikely(copied < len && !folio_test_uptodate(folio))) {
+ /*
+ * The folio needs to be read in, but that would destroy
+ * our partial write - simplest thing is to just force
+ * userspace to redo the write:
+ */
+ folio_zero_range(folio, 0, folio_size(folio));
+ flush_dcache_folio(folio);
+ copied = 0;
+ }
+
+ spin_lock(&inode->v.i_lock);
+ if (pos + copied > inode->v.i_size)
+ i_size_write(&inode->v, pos + copied);
+ spin_unlock(&inode->v.i_lock);
+
+ if (copied) {
+ if (!folio_test_uptodate(folio))
+ folio_mark_uptodate(folio);
+
+ bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
+
+ inode->ei_last_dirtied = (unsigned long) current;
+ }
+
+ folio_unlock(folio);
+ folio_put(folio);
+ bch2_pagecache_add_put(inode);
+
+ bch2_folio_reservation_put(c, inode, res);
+ kfree(res);
+
+ return copied;
+}
+
+static noinline void folios_trunc(folios *fs, struct folio **fi)
+{
+ while (fs->data + fs->nr > fi) {
+ struct folio *f = darray_pop(fs);
+
+ folio_unlock(f);
+ folio_put(f);
+ }
+}
+
+static int __bch2_buffered_write(struct bch_inode_info *inode,
+ struct address_space *mapping,
+ struct iov_iter *iter,
+ loff_t pos, unsigned len)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation res;
+ folios fs;
+ struct folio **fi, *f;
+ unsigned copied = 0, f_offset, f_copied;
+ u64 end = pos + len, f_pos, f_len;
+ loff_t last_folio_pos = inode->v.i_size;
+ int ret = 0;
+
+ BUG_ON(!len);
+
+ bch2_folio_reservation_init(c, inode, &res);
+ darray_init(&fs);
+
+ ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
+ FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
+ mapping_gfp_mask(mapping),
+ &fs);
+ if (ret)
+ goto out;
+
+ BUG_ON(!fs.nr);
+
+ f = darray_first(fs);
+ if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
+ ret = bch2_read_single_folio(f, mapping);
+ if (ret)
+ goto out;
+ }
+
+ f = darray_last(fs);
+ end = min(end, folio_end_pos(f));
+ last_folio_pos = folio_pos(f);
+ if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
+ if (end >= inode->v.i_size) {
+ folio_zero_range(f, 0, folio_size(f));
+ } else {
+ ret = bch2_read_single_folio(f, mapping);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
+ if (ret)
+ goto out;
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(fs));
+ darray_for_each(fs, fi) {
+ f = *fi;
+ f_len = min(end, folio_end_pos(f)) - f_pos;
+
+ /*
+ * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
+ * supposed to write as much as we have disk space for.
+ *
+ * On failure here we should still write out a partial page if
+ * we aren't completely out of disk space - we don't do that
+ * yet:
+ */
+ ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
+ if (unlikely(ret)) {
+ folios_trunc(&fs, fi);
+ if (!fs.nr)
+ goto out;
+
+ end = min(end, folio_end_pos(darray_last(fs)));
+ break;
+ }
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
+ }
+
+ if (mapping_writably_mapped(mapping))
+ darray_for_each(fs, fi)
+ flush_dcache_folio(*fi);
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(fs));
+ darray_for_each(fs, fi) {
+ f = *fi;
+ f_len = min(end, folio_end_pos(f)) - f_pos;
+ f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
+ if (!f_copied) {
+ folios_trunc(&fs, fi);
+ break;
+ }
+
+ if (!folio_test_uptodate(f) &&
+ f_copied != folio_size(f) &&
+ pos + copied + f_copied < inode->v.i_size) {
+ iov_iter_revert(iter, f_copied);
+ folio_zero_range(f, 0, folio_size(f));
+ folios_trunc(&fs, fi);
+ break;
+ }
+
+ flush_dcache_folio(f);
+ copied += f_copied;
+
+ if (f_copied != f_len) {
+ folios_trunc(&fs, fi + 1);
+ break;
+ }
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
+ }
+
+ if (!copied)
+ goto out;
+
+ end = pos + copied;
+
+ spin_lock(&inode->v.i_lock);
+ if (end > inode->v.i_size)
+ i_size_write(&inode->v, end);
+ spin_unlock(&inode->v.i_lock);
+
+ f_pos = pos;
+ f_offset = pos - folio_pos(darray_first(fs));
+ darray_for_each(fs, fi) {
+ f = *fi;
+ f_len = min(end, folio_end_pos(f)) - f_pos;
+
+ if (!folio_test_uptodate(f))
+ folio_mark_uptodate(f);
+
+ bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
+
+ f_pos = folio_end_pos(f);
+ f_offset = 0;
+ }
+
+ inode->ei_last_dirtied = (unsigned long) current;
+out:
+ darray_for_each(fs, fi) {
+ folio_unlock(*fi);
+ folio_put(*fi);
+ }
+
+ /*
+ * If the last folio added to the mapping starts beyond current EOF, we
+ * performed a short write but left around at least one post-EOF folio.
+ * Clean up the mapping before we return.
+ */
+ if (last_folio_pos >= inode->v.i_size)
+ truncate_pagecache(&inode->v, inode->v.i_size);
+
+ darray_exit(&fs);
+ bch2_folio_reservation_put(c, inode, &res);
+
+ return copied ?: ret;
+}
+
+static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ loff_t pos = iocb->ki_pos;
+ ssize_t written = 0;
+ int ret = 0;
+
+ bch2_pagecache_add_get(inode);
+
+ do {
+ unsigned offset = pos & (PAGE_SIZE - 1);
+ unsigned bytes = iov_iter_count(iter);
+again:
+ /*
+ * Bring in the user page that we will copy from _first_.
+ * Otherwise there's a nasty deadlock on copying from the
+ * same page as we're writing to, without it being marked
+ * up-to-date.
+ *
+ * Not only is this an optimisation, but it is also required
+ * to check that the address is actually valid, when atomic
+ * usercopies are used, below.
+ */
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
+ bytes = min_t(unsigned long, iov_iter_count(iter),
+ PAGE_SIZE - offset);
+
+ if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
+ ret = -EFAULT;
+ break;
+ }
+ }
+
+ if (unlikely(fatal_signal_pending(current))) {
+ ret = -EINTR;
+ break;
+ }
+
+ ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
+ if (unlikely(ret < 0))
+ break;
+
+ cond_resched();
+
+ if (unlikely(ret == 0)) {
+ /*
+ * If we were unable to copy any data at all, we must
+ * fall back to a single segment length write.
+ *
+ * If we didn't fallback here, we could livelock
+ * because not all segments in the iov can be copied at
+ * once without a pagefault.
+ */
+ bytes = min_t(unsigned long, PAGE_SIZE - offset,
+ iov_iter_single_seg_count(iter));
+ goto again;
+ }
+ pos += ret;
+ written += ret;
+ ret = 0;
+
+ balance_dirty_pages_ratelimited(mapping);
+ } while (iov_iter_count(iter));
+
+ bch2_pagecache_add_put(inode);
+
+ return written ? written : ret;
+}
+
+ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ ssize_t ret;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = bch2_direct_write(iocb, from);
+ goto out;
+ }
+
+ inode_lock(&inode->v);
+
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0)
+ goto unlock;
+
+ ret = file_remove_privs(file);
+ if (ret)
+ goto unlock;
+
+ ret = file_update_time(file);
+ if (ret)
+ goto unlock;
+
+ ret = bch2_buffered_write(iocb, from);
+ if (likely(ret > 0))
+ iocb->ki_pos += ret;
+unlock:
+ inode_unlock(&inode->v);
+
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+out:
+ return bch2_err_class(ret);
+}
+
+void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
+{
+ bioset_exit(&c->writepage_bioset);
+}
+
+int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
+{
+ if (bioset_init(&c->writepage_bioset,
+ 4, offsetof(struct bch_writepage_io, op.wbio.bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_writepage_bioset_init;
+
+ return 0;
+}
+
+#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io-buffered.h b/fs/bcachefs/fs-io-buffered.h
new file mode 100644
index 000000000000..a6126ff790e6
--- /dev/null
+++ b/fs/bcachefs/fs-io-buffered.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FS_IO_BUFFERED_H
+#define _BCACHEFS_FS_IO_BUFFERED_H
+
+#ifndef NO_BCACHEFS_FS
+
+int bch2_read_single_folio(struct folio *, struct address_space *);
+int bch2_read_folio(struct file *, struct folio *);
+
+int bch2_writepages(struct address_space *, struct writeback_control *);
+void bch2_readahead(struct readahead_control *);
+
+int bch2_write_begin(struct file *, struct address_space *, loff_t,
+ unsigned, struct page **, void **);
+int bch2_write_end(struct file *, struct address_space *, loff_t,
+ unsigned, unsigned, struct page *, void *);
+
+ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
+
+void bch2_fs_fs_io_buffered_exit(struct bch_fs *);
+int bch2_fs_fs_io_buffered_init(struct bch_fs *);
+#else
+static inline void bch2_fs_fs_io_buffered_exit(struct bch_fs *c) {}
+static inline int bch2_fs_fs_io_buffered_init(struct bch_fs *c) { return 0; }
+#endif
+
+#endif /* _BCACHEFS_FS_IO_BUFFERED_H */
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
new file mode 100644
index 000000000000..6a9557e7ecab
--- /dev/null
+++ b/fs/bcachefs/fs-io-direct.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_FS
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "fs.h"
+#include "fs-io.h"
+#include "fs-io-direct.h"
+#include "fs-io-pagecache.h"
+#include "io_read.h"
+#include "io_write.h"
+
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+#include <linux/prefetch.h>
+#include <linux/task_io_accounting_ops.h>
+
+/* O_DIRECT reads */
+
+struct dio_read {
+ struct closure cl;
+ struct kiocb *req;
+ long ret;
+ bool should_dirty;
+ struct bch_read_bio rbio;
+};
+
+static void bio_check_or_release(struct bio *bio, bool check_dirty)
+{
+ if (check_dirty) {
+ bio_check_pages_dirty(bio);
+ } else {
+ bio_release_pages(bio, false);
+ bio_put(bio);
+ }
+}
+
+static void bch2_dio_read_complete(struct closure *cl)
+{
+ struct dio_read *dio = container_of(cl, struct dio_read, cl);
+
+ dio->req->ki_complete(dio->req, dio->ret);
+ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
+}
+
+static void bch2_direct_IO_read_endio(struct bio *bio)
+{
+ struct dio_read *dio = bio->bi_private;
+
+ if (bio->bi_status)
+ dio->ret = blk_status_to_errno(bio->bi_status);
+
+ closure_put(&dio->cl);
+}
+
+static void bch2_direct_IO_read_split_endio(struct bio *bio)
+{
+ struct dio_read *dio = bio->bi_private;
+ bool should_dirty = dio->should_dirty;
+
+ bch2_direct_IO_read_endio(bio);
+ bio_check_or_release(bio, should_dirty);
+}
+
+static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
+{
+ struct file *file = req->ki_filp;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_io_opts opts;
+ struct dio_read *dio;
+ struct bio *bio;
+ loff_t offset = req->ki_pos;
+ bool sync = is_sync_kiocb(req);
+ size_t shorten;
+ ssize_t ret;
+
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ if ((offset|iter->count) & (block_bytes(c) - 1))
+ return -EINVAL;
+
+ ret = min_t(loff_t, iter->count,
+ max_t(loff_t, 0, i_size_read(&inode->v) - offset));
+
+ if (!ret)
+ return ret;
+
+ shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
+ iter->count -= shorten;
+
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
+ &c->dio_read_bioset);
+
+ bio->bi_end_io = bch2_direct_IO_read_endio;
+
+ dio = container_of(bio, struct dio_read, rbio.bio);
+ closure_init(&dio->cl, NULL);
+
+ /*
+ * this is a _really_ horrible hack just to avoid an atomic sub at the
+ * end:
+ */
+ if (!sync) {
+ set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
+ atomic_set(&dio->cl.remaining,
+ CLOSURE_REMAINING_INITIALIZER -
+ CLOSURE_RUNNING +
+ CLOSURE_DESTRUCTOR);
+ } else {
+ atomic_set(&dio->cl.remaining,
+ CLOSURE_REMAINING_INITIALIZER + 1);
+ }
+
+ dio->req = req;
+ dio->ret = ret;
+ /*
+ * This is one of the sketchier things I've encountered: we have to skip
+ * the dirtying of requests that are internal from the kernel (i.e. from
+ * loopback), because we'll deadlock on page_lock.
+ */
+ dio->should_dirty = iter_is_iovec(iter);
+
+ goto start;
+ while (iter->count) {
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_READ,
+ GFP_KERNEL,
+ &c->bio_read);
+ bio->bi_end_io = bch2_direct_IO_read_split_endio;
+start:
+ bio->bi_opf = REQ_OP_READ|REQ_SYNC;
+ bio->bi_iter.bi_sector = offset >> 9;
+ bio->bi_private = dio;
+
+ ret = bio_iov_iter_get_pages(bio, iter);
+ if (ret < 0) {
+ /* XXX: fault inject this path */
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ break;
+ }
+
+ offset += bio->bi_iter.bi_size;
+
+ if (dio->should_dirty)
+ bio_set_pages_dirty(bio);
+
+ if (iter->count)
+ closure_get(&dio->cl);
+
+ bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
+ }
+
+ iter->count += shorten;
+
+ if (sync) {
+ closure_sync(&dio->cl);
+ closure_debug_destroy(&dio->cl);
+ ret = dio->ret;
+ bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
+ return ret;
+ } else {
+ return -EIOCBQUEUED;
+ }
+}
+
+ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct file *file = iocb->ki_filp;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct address_space *mapping = file->f_mapping;
+ size_t count = iov_iter_count(iter);
+ ssize_t ret;
+
+ if (!count)
+ return 0; /* skip atime */
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ struct blk_plug plug;
+
+ if (unlikely(mapping->nrpages)) {
+ ret = filemap_write_and_wait_range(mapping,
+ iocb->ki_pos,
+ iocb->ki_pos + count - 1);
+ if (ret < 0)
+ goto out;
+ }
+
+ file_accessed(file);
+
+ blk_start_plug(&plug);
+ ret = bch2_direct_IO_read(iocb, iter);
+ blk_finish_plug(&plug);
+
+ if (ret >= 0)
+ iocb->ki_pos += ret;
+ } else {
+ bch2_pagecache_add_get(inode);
+ ret = generic_file_read_iter(iocb, iter);
+ bch2_pagecache_add_put(inode);
+ }
+out:
+ return bch2_err_class(ret);
+}
+
+/* O_DIRECT writes */
+
+struct dio_write {
+ struct kiocb *req;
+ struct address_space *mapping;
+ struct bch_inode_info *inode;
+ struct mm_struct *mm;
+ unsigned loop:1,
+ extending:1,
+ sync:1,
+ flush:1,
+ free_iov:1;
+ struct quota_res quota_res;
+ u64 written;
+
+ struct iov_iter iter;
+ struct iovec inline_vecs[2];
+
+ /* must be last: */
+ struct bch_write_op op;
+};
+
+static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
+ u64 offset, u64 size,
+ unsigned nr_replicas, bool compressed)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 end = offset + size;
+ u32 snapshot;
+ bool ret = true;
+ int err;
+retry:
+ bch2_trans_begin(trans);
+
+ err = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (err)
+ goto err;
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
+ SPOS(inum.inum, offset, snapshot),
+ BTREE_ITER_SLOTS, k, err) {
+ if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
+ break;
+
+ if (k.k->p.snapshot != snapshot ||
+ nr_replicas > bch2_bkey_replicas(c, k) ||
+ (!compressed && bch2_bkey_sectors_compressed(k))) {
+ ret = false;
+ break;
+ }
+ }
+
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(err, BCH_ERR_transaction_restart))
+ goto retry;
+ bch2_trans_put(trans);
+
+ return err ? false : ret;
+}
+
+static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
+
+ return bch2_check_range_allocated(c, inode_inum(inode),
+ dio->op.pos.offset, bio_sectors(bio),
+ dio->op.opts.data_replicas,
+ dio->op.opts.compression != 0);
+}
+
+static void bch2_dio_write_loop_async(struct bch_write_op *);
+static __always_inline long bch2_dio_write_done(struct dio_write *dio);
+
+/*
+ * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
+ * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
+ * caller's stack, we're not guaranteed that it will live for the duration of
+ * the IO:
+ */
+static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
+{
+ struct iovec *iov = dio->inline_vecs;
+
+ /*
+ * iov_iter has a single embedded iovec - nothing to do:
+ */
+ if (iter_is_ubuf(&dio->iter))
+ return 0;
+
+ /*
+ * We don't currently handle non-iovec iov_iters here - return an error,
+ * and we'll fall back to doing the IO synchronously:
+ */
+ if (!iter_is_iovec(&dio->iter))
+ return -1;
+
+ if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
+ iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
+ GFP_KERNEL);
+ if (unlikely(!iov))
+ return -ENOMEM;
+
+ dio->free_iov = true;
+ }
+
+ memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
+ dio->iter.__iov = iov;
+ return 0;
+}
+
+static void bch2_dio_write_flush_done(struct closure *cl)
+{
+ struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
+ struct bch_fs *c = dio->op.c;
+
+ closure_debug_destroy(cl);
+
+ dio->op.error = bch2_journal_error(&c->journal);
+
+ bch2_dio_write_done(dio);
+}
+
+static noinline void bch2_dio_write_flush(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ dio->flush = 0;
+
+ closure_init(&dio->op.cl, NULL);
+
+ if (!dio->op.error) {
+ ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
+ if (ret) {
+ dio->op.error = ret;
+ } else {
+ bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
+ &dio->op.cl);
+ bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
+ }
+ }
+
+ if (dio->sync) {
+ closure_sync(&dio->op.cl);
+ closure_debug_destroy(&dio->op.cl);
+ } else {
+ continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
+ }
+}
+
+static __always_inline long bch2_dio_write_done(struct dio_write *dio)
+{
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
+ bool sync = dio->sync;
+ long ret;
+
+ if (unlikely(dio->flush)) {
+ bch2_dio_write_flush(dio);
+ if (!sync)
+ return -EIOCBQUEUED;
+ }
+
+ bch2_pagecache_block_put(inode);
+
+ if (dio->free_iov)
+ kfree(dio->iter.__iov);
+
+ ret = dio->op.error ?: ((long) dio->written << 9);
+ bio_put(&dio->op.wbio.bio);
+
+ /* inode->i_dio_count is our ref on inode and thus bch_fs */
+ inode_dio_end(&inode->v);
+
+ if (ret < 0)
+ ret = bch2_err_class(ret);
+
+ if (!sync) {
+ req->ki_complete(req, ret);
+ ret = -EIOCBQUEUED;
+ }
+ return ret;
+}
+
+static __always_inline void bch2_dio_write_end(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct bch_inode_info *inode = dio->inode;
+ struct bio *bio = &dio->op.wbio.bio;
+
+ req->ki_pos += (u64) dio->op.written << 9;
+ dio->written += dio->op.written;
+
+ if (dio->extending) {
+ spin_lock(&inode->v.i_lock);
+ if (req->ki_pos > inode->v.i_size)
+ i_size_write(&inode->v, req->ki_pos);
+ spin_unlock(&inode->v.i_lock);
+ }
+
+ if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
+ __bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+
+ bio_release_pages(bio, false);
+
+ if (unlikely(dio->op.error))
+ set_bit(EI_INODE_ERROR, &inode->ei_flags);
+}
+
+static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
+{
+ struct bch_fs *c = dio->op.c;
+ struct kiocb *req = dio->req;
+ struct address_space *mapping = dio->mapping;
+ struct bch_inode_info *inode = dio->inode;
+ struct bch_io_opts opts;
+ struct bio *bio = &dio->op.wbio.bio;
+ unsigned unaligned, iter_count;
+ bool sync = dio->sync, dropped_locks;
+ long ret;
+
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ while (1) {
+ iter_count = dio->iter.count;
+
+ EBUG_ON(current->faults_disabled_mapping);
+ current->faults_disabled_mapping = mapping;
+
+ ret = bio_iov_iter_get_pages(bio, &dio->iter);
+
+ dropped_locks = fdm_dropped_locks();
+
+ current->faults_disabled_mapping = NULL;
+
+ /*
+ * If the fault handler returned an error but also signalled
+ * that it dropped & retook ei_pagecache_lock, we just need to
+ * re-shoot down the page cache and retry:
+ */
+ if (dropped_locks && ret)
+ ret = 0;
+
+ if (unlikely(ret < 0))
+ goto err;
+
+ if (unlikely(dropped_locks)) {
+ ret = bch2_write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter_count - 1);
+ if (unlikely(ret))
+ goto err;
+
+ if (!bio->bi_iter.bi_size)
+ continue;
+ }
+
+ unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
+ bio->bi_iter.bi_size -= unaligned;
+ iov_iter_revert(&dio->iter, unaligned);
+
+ if (!bio->bi_iter.bi_size) {
+ /*
+ * bio_iov_iter_get_pages was only able to get <
+ * blocksize worth of pages:
+ */
+ ret = -EFAULT;
+ goto err;
+ }
+
+ bch2_write_op_init(&dio->op, c, opts);
+ dio->op.end_io = sync
+ ? NULL
+ : bch2_dio_write_loop_async;
+ dio->op.target = dio->op.opts.foreground_target;
+ dio->op.write_point = writepoint_hashed((unsigned long) current);
+ dio->op.nr_replicas = dio->op.opts.data_replicas;
+ dio->op.subvol = inode->ei_subvol;
+ dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+ dio->op.devs_need_flush = &inode->ei_devs_need_flush;
+
+ if (sync)
+ dio->op.flags |= BCH_WRITE_SYNC;
+ dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
+
+ ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
+ bio_sectors(bio), true);
+ if (unlikely(ret))
+ goto err;
+
+ ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
+ dio->op.opts.data_replicas, 0);
+ if (unlikely(ret) &&
+ !bch2_dio_write_check_allocated(dio))
+ goto err;
+
+ task_io_account_write(bio->bi_iter.bi_size);
+
+ if (unlikely(dio->iter.count) &&
+ !dio->sync &&
+ !dio->loop &&
+ bch2_dio_write_copy_iov(dio))
+ dio->sync = sync = true;
+
+ dio->loop = true;
+ closure_call(&dio->op.cl, bch2_write, NULL, NULL);
+
+ if (!sync)
+ return -EIOCBQUEUED;
+
+ bch2_dio_write_end(dio);
+
+ if (likely(!dio->iter.count) || dio->op.error)
+ break;
+
+ bio_reset(bio, NULL, REQ_OP_WRITE);
+ }
+out:
+ return bch2_dio_write_done(dio);
+err:
+ dio->op.error = ret;
+
+ bio_release_pages(bio, false);
+
+ bch2_quota_reservation_put(c, inode, &dio->quota_res);
+ goto out;
+}
+
+static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
+{
+ struct mm_struct *mm = dio->mm;
+
+ bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
+
+ if (mm)
+ kthread_use_mm(mm);
+ bch2_dio_write_loop(dio);
+ if (mm)
+ kthread_unuse_mm(mm);
+}
+
+static void bch2_dio_write_loop_async(struct bch_write_op *op)
+{
+ struct dio_write *dio = container_of(op, struct dio_write, op);
+
+ bch2_dio_write_end(dio);
+
+ if (likely(!dio->iter.count) || dio->op.error)
+ bch2_dio_write_done(dio);
+ else
+ bch2_dio_write_continue(dio);
+}
+
+ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
+{
+ struct file *file = req->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct dio_write *dio;
+ struct bio *bio;
+ bool locked = true, extending;
+ ssize_t ret;
+
+ prefetch(&c->opts);
+ prefetch((void *) &c->opts + 64);
+ prefetch(&inode->ei_inode);
+ prefetch((void *) &inode->ei_inode + 64);
+
+ inode_lock(&inode->v);
+
+ ret = generic_write_checks(req, iter);
+ if (unlikely(ret <= 0))
+ goto err;
+
+ ret = file_remove_privs(file);
+ if (unlikely(ret))
+ goto err;
+
+ ret = file_update_time(file);
+ if (unlikely(ret))
+ goto err;
+
+ if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
+ goto err;
+
+ inode_dio_begin(&inode->v);
+ bch2_pagecache_block_get(inode);
+
+ extending = req->ki_pos + iter->count > inode->v.i_size;
+ if (!extending) {
+ inode_unlock(&inode->v);
+ locked = false;
+ }
+
+ bio = bio_alloc_bioset(NULL,
+ bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
+ REQ_OP_WRITE,
+ GFP_KERNEL,
+ &c->dio_write_bioset);
+ dio = container_of(bio, struct dio_write, op.wbio.bio);
+ dio->req = req;
+ dio->mapping = mapping;
+ dio->inode = inode;
+ dio->mm = current->mm;
+ dio->loop = false;
+ dio->extending = extending;
+ dio->sync = is_sync_kiocb(req) || extending;
+ dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
+ dio->free_iov = false;
+ dio->quota_res.sectors = 0;
+ dio->written = 0;
+ dio->iter = *iter;
+ dio->op.c = c;
+
+ if (unlikely(mapping->nrpages)) {
+ ret = bch2_write_invalidate_inode_pages_range(mapping,
+ req->ki_pos,
+ req->ki_pos + iter->count - 1);
+ if (unlikely(ret))
+ goto err_put_bio;
+ }
+
+ ret = bch2_dio_write_loop(dio);
+err:
+ if (locked)
+ inode_unlock(&inode->v);
+ return ret;
+err_put_bio:
+ bch2_pagecache_block_put(inode);
+ bio_put(bio);
+ inode_dio_end(&inode->v);
+ goto err;
+}
+
+void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
+{
+ bioset_exit(&c->dio_write_bioset);
+ bioset_exit(&c->dio_read_bioset);
+}
+
+int bch2_fs_fs_io_direct_init(struct bch_fs *c)
+{
+ if (bioset_init(&c->dio_read_bioset,
+ 4, offsetof(struct dio_read, rbio.bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_read_bioset_init;
+
+ if (bioset_init(&c->dio_write_bioset,
+ 4, offsetof(struct dio_write, op.wbio.bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_dio_write_bioset_init;
+
+ return 0;
+}
+
+#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io-direct.h b/fs/bcachefs/fs-io-direct.h
new file mode 100644
index 000000000000..814621ec7f81
--- /dev/null
+++ b/fs/bcachefs/fs-io-direct.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FS_IO_DIRECT_H
+#define _BCACHEFS_FS_IO_DIRECT_H
+
+#ifndef NO_BCACHEFS_FS
+ssize_t bch2_direct_write(struct kiocb *, struct iov_iter *);
+ssize_t bch2_read_iter(struct kiocb *, struct iov_iter *);
+
+void bch2_fs_fs_io_direct_exit(struct bch_fs *);
+int bch2_fs_fs_io_direct_init(struct bch_fs *);
+#else
+static inline void bch2_fs_fs_io_direct_exit(struct bch_fs *c) {}
+static inline int bch2_fs_fs_io_direct_init(struct bch_fs *c) { return 0; }
+#endif
+
+#endif /* _BCACHEFS_FS_IO_DIRECT_H */
diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c
new file mode 100644
index 000000000000..8bd9bcdd27f7
--- /dev/null
+++ b/fs/bcachefs/fs-io-pagecache.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_FS
+
+#include "bcachefs.h"
+#include "btree_iter.h"
+#include "extents.h"
+#include "fs-io.h"
+#include "fs-io-pagecache.h"
+#include "subvolume.h"
+
+#include <linux/pagevec.h>
+#include <linux/writeback.h>
+
+int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
+ loff_t start, u64 end,
+ int fgp_flags, gfp_t gfp,
+ folios *fs)
+{
+ struct folio *f;
+ u64 pos = start;
+ int ret = 0;
+
+ while (pos < end) {
+ if ((u64) pos >= (u64) start + (1ULL << 20))
+ fgp_flags &= ~FGP_CREAT;
+
+ ret = darray_make_room_gfp(fs, 1, gfp & GFP_KERNEL);
+ if (ret)
+ break;
+
+ f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
+ if (IS_ERR_OR_NULL(f))
+ break;
+
+ BUG_ON(fs->nr && folio_pos(f) != pos);
+
+ pos = folio_end_pos(f);
+ darray_push(fs, f);
+ }
+
+ if (!fs->nr && !ret && (fgp_flags & FGP_CREAT))
+ ret = -ENOMEM;
+
+ return fs->nr ? 0 : ret;
+}
+
+/* pagecache_block must be held */
+int bch2_write_invalidate_inode_pages_range(struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ int ret;
+
+ /*
+ * XXX: the way this is currently implemented, we can spin if a process
+ * is continually redirtying a specific page
+ */
+ do {
+ if (!mapping->nrpages)
+ return 0;
+
+ ret = filemap_write_and_wait_range(mapping, start, end);
+ if (ret)
+ break;
+
+ if (!mapping->nrpages)
+ return 0;
+
+ ret = invalidate_inode_pages2_range(mapping,
+ start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
+ } while (ret == -EBUSY);
+
+ return ret;
+}
+
+#if 0
+/* Useful for debug tracing: */
+static const char * const bch2_folio_sector_states[] = {
+#define x(n) #n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+ NULL
+};
+#endif
+
+static inline enum bch_folio_sector_state
+folio_sector_dirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_dirty;
+ case SECTOR_reserved:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
+ }
+}
+
+static inline enum bch_folio_sector_state
+folio_sector_undirty(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_dirty:
+ return SECTOR_unallocated;
+ case SECTOR_dirty_reserved:
+ return SECTOR_reserved;
+ default:
+ return state;
+ }
+}
+
+static inline enum bch_folio_sector_state
+folio_sector_reserve(enum bch_folio_sector_state state)
+{
+ switch (state) {
+ case SECTOR_unallocated:
+ return SECTOR_reserved;
+ case SECTOR_dirty:
+ return SECTOR_dirty_reserved;
+ default:
+ return state;
+ }
+}
+
+/* for newly allocated folios: */
+struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
+{
+ struct bch_folio *s;
+
+ s = kzalloc(sizeof(*s) +
+ sizeof(struct bch_folio_sector) *
+ folio_sectors(folio), gfp);
+ if (!s)
+ return NULL;
+
+ spin_lock_init(&s->lock);
+ folio_attach_private(folio, s);
+ return s;
+}
+
+struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
+{
+ return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
+}
+
+static unsigned bkey_to_sector_state(struct bkey_s_c k)
+{
+ if (bkey_extent_is_reservation(k))
+ return SECTOR_reserved;
+ if (bkey_extent_is_allocation(k.k))
+ return SECTOR_allocated;
+ return SECTOR_unallocated;
+}
+
+static void __bch2_folio_set(struct folio *folio,
+ unsigned pg_offset, unsigned pg_len,
+ unsigned nr_ptrs, unsigned state)
+{
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, sectors = folio_sectors(folio);
+
+ BUG_ON(pg_offset >= sectors);
+ BUG_ON(pg_offset + pg_len > sectors);
+
+ spin_lock(&s->lock);
+
+ for (i = pg_offset; i < pg_offset + pg_len; i++) {
+ s->s[i].nr_replicas = nr_ptrs;
+ bch2_folio_sector_set(folio, s, i, state);
+ }
+
+ if (i == sectors)
+ s->uptodate = true;
+
+ spin_unlock(&s->lock);
+}
+
+/*
+ * Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
+ * extents btree:
+ */
+int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
+ struct folio **fs, unsigned nr_folios)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_folio *s;
+ u64 offset = folio_sector(fs[0]);
+ unsigned folio_idx;
+ u32 snapshot;
+ bool need_set = false;
+ int ret;
+
+ for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
+ s = bch2_folio_create(fs[folio_idx], GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ need_set |= !s->uptodate;
+ }
+
+ if (!need_set)
+ return 0;
+
+ folio_idx = 0;
+ trans = bch2_trans_get(c);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
+ SPOS(inum.inum, offset, snapshot),
+ BTREE_ITER_SLOTS, k, ret) {
+ unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
+
+ while (folio_idx < nr_folios) {
+ struct folio *folio = fs[folio_idx];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
+ folio_start;
+ unsigned folio_len = min(k.k->p.offset, folio_end) -
+ folio_offset - folio_start;
+
+ BUG_ON(k.k->p.offset < folio_start);
+ BUG_ON(bkey_start_offset(k.k) > folio_end);
+
+ if (!bch2_folio(folio)->uptodate)
+ __bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
+
+ if (k.k->p.offset < folio_end)
+ break;
+ folio_idx++;
+ }
+
+ if (folio_idx == nr_folios)
+ break;
+ }
+
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
+{
+ struct bvec_iter iter;
+ struct folio_vec fv;
+ unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
+ ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
+ unsigned state = bkey_to_sector_state(k);
+
+ bio_for_each_folio(fv, bio, iter)
+ __bch2_folio_set(fv.fv_folio,
+ fv.fv_offset >> 9,
+ fv.fv_len >> 9,
+ nr_ptrs, state);
+}
+
+void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ unsigned i, j;
+
+ if (end <= start)
+ return;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
+
+ BUG_ON(end <= folio_start);
+
+ folio_lock(folio);
+ s = bch2_folio(folio);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = folio_offset; j < folio_offset + folio_len; j++)
+ s->s[j].nr_replicas = 0;
+ spin_unlock(&s->lock);
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+}
+
+void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
+ u64 start, u64 end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ pgoff_t index = start >> PAGE_SECTORS_SHIFT;
+ pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
+ struct folio_batch fbatch;
+ s64 i_sectors_delta = 0;
+ unsigned i, j;
+
+ if (end <= start)
+ return;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(inode->v.i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ u64 folio_start = folio_sector(folio);
+ u64 folio_end = folio_end_sector(folio);
+ unsigned folio_offset = max(start, folio_start) - folio_start;
+ unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
+ struct bch_folio *s;
+
+ BUG_ON(end <= folio_start);
+
+ folio_lock(folio);
+ s = bch2_folio(folio);
+
+ if (s) {
+ spin_lock(&s->lock);
+ for (j = folio_offset; j < folio_offset + folio_len; j++) {
+ i_sectors_delta -= s->s[j].state == SECTOR_dirty;
+ bch2_folio_sector_set(folio, s, j,
+ folio_sector_reserve(s->s[j].state));
+ }
+ spin_unlock(&s->lock);
+ }
+
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+}
+
+static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
+ unsigned nr_replicas)
+{
+ return max(0, (int) nr_replicas -
+ s->nr_replicas -
+ s->replicas_reserved);
+}
+
+int bch2_get_folio_disk_reservation(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio, bool check_enospc)
+{
+ struct bch_folio *s = bch2_folio_create(folio, 0);
+ unsigned nr_replicas = inode_nr_replicas(c, inode);
+ struct disk_reservation disk_res = { 0 };
+ unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
+ int ret;
+
+ if (!s)
+ return -ENOMEM;
+
+ for (i = 0; i < sectors; i++)
+ disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
+
+ if (!disk_res_sectors)
+ return 0;
+
+ ret = bch2_disk_reservation_get(c, &disk_res,
+ disk_res_sectors, 1,
+ !check_enospc
+ ? BCH_DISK_RESERVATION_NOFAIL
+ : 0);
+ if (unlikely(ret))
+ return ret;
+
+ for (i = 0; i < sectors; i++)
+ s->s[i].replicas_reserved +=
+ sectors_to_reserve(&s->s[i], nr_replicas);
+
+ return 0;
+}
+
+void bch2_folio_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct bch2_folio_reservation *res)
+{
+ bch2_disk_reservation_put(c, &res->disk);
+ bch2_quota_reservation_put(c, inode, &res->quota);
+}
+
+int bch2_folio_reservation_get(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
+ unsigned offset, unsigned len)
+{
+ struct bch_folio *s = bch2_folio_create(folio, 0);
+ unsigned i, disk_sectors = 0, quota_sectors = 0;
+ int ret;
+
+ if (!s)
+ return -ENOMEM;
+
+ BUG_ON(!s->uptodate);
+
+ for (i = round_down(offset, block_bytes(c)) >> 9;
+ i < round_up(offset + len, block_bytes(c)) >> 9;
+ i++) {
+ disk_sectors += sectors_to_reserve(&s->s[i],
+ res->disk.nr_replicas);
+ quota_sectors += s->s[i].state == SECTOR_unallocated;
+ }
+
+ if (disk_sectors) {
+ ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (quota_sectors) {
+ ret = bch2_quota_reservation_add(c, inode, &res->quota,
+ quota_sectors, true);
+ if (unlikely(ret)) {
+ struct disk_reservation tmp = {
+ .sectors = disk_sectors
+ };
+
+ bch2_disk_reservation_put(c, &tmp);
+ res->disk.sectors -= disk_sectors;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void bch2_clear_folio_bits(struct folio *folio)
+{
+ struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_folio *s = bch2_folio(folio);
+ struct disk_reservation disk_res = { 0 };
+ int i, sectors = folio_sectors(folio), dirty_sectors = 0;
+
+ if (!s)
+ return;
+
+ EBUG_ON(!folio_test_locked(folio));
+ EBUG_ON(folio_test_writeback(folio));
+
+ for (i = 0; i < sectors; i++) {
+ disk_res.sectors += s->s[i].replicas_reserved;
+ s->s[i].replicas_reserved = 0;
+
+ dirty_sectors -= s->s[i].state == SECTOR_dirty;
+ bch2_folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
+ }
+
+ bch2_disk_reservation_put(c, &disk_res);
+
+ bch2_i_sectors_acct(c, inode, NULL, dirty_sectors);
+
+ bch2_folio_release(folio);
+}
+
+void bch2_set_folio_dirty(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct folio *folio,
+ struct bch2_folio_reservation *res,
+ unsigned offset, unsigned len)
+{
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, dirty_sectors = 0;
+
+ WARN_ON((u64) folio_pos(folio) + offset + len >
+ round_up((u64) i_size_read(&inode->v), block_bytes(c)));
+
+ BUG_ON(!s->uptodate);
+
+ spin_lock(&s->lock);
+
+ for (i = round_down(offset, block_bytes(c)) >> 9;
+ i < round_up(offset + len, block_bytes(c)) >> 9;
+ i++) {
+ unsigned sectors = sectors_to_reserve(&s->s[i],
+ res->disk.nr_replicas);
+
+ /*
+ * This can happen if we race with the error path in
+ * bch2_writepage_io_done():
+ */
+ sectors = min_t(unsigned, sectors, res->disk.sectors);
+
+ s->s[i].replicas_reserved += sectors;
+ res->disk.sectors -= sectors;
+
+ dirty_sectors += s->s[i].state == SECTOR_unallocated;
+
+ bch2_folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
+ }
+
+ spin_unlock(&s->lock);
+
+ bch2_i_sectors_acct(c, inode, &res->quota, dirty_sectors);
+
+ if (!folio_test_dirty(folio))
+ filemap_dirty_folio(inode->v.i_mapping, folio);
+}
+
+vm_fault_t bch2_page_fault(struct vm_fault *vmf)
+{
+ struct file *file = vmf->vma->vm_file;
+ struct address_space *mapping = file->f_mapping;
+ struct address_space *fdm = faults_disabled_mapping();
+ struct bch_inode_info *inode = file_bch_inode(file);
+ vm_fault_t ret;
+
+ if (fdm == mapping)
+ return VM_FAULT_SIGBUS;
+
+ /* Lock ordering: */
+ if (fdm > mapping) {
+ struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
+
+ if (bch2_pagecache_add_tryget(inode))
+ goto got_lock;
+
+ bch2_pagecache_block_put(fdm_host);
+
+ bch2_pagecache_add_get(inode);
+ bch2_pagecache_add_put(inode);
+
+ bch2_pagecache_block_get(fdm_host);
+
+ /* Signal that lock has been dropped: */
+ set_fdm_dropped_locks();
+ return VM_FAULT_SIGBUS;
+ }
+
+ bch2_pagecache_add_get(inode);
+got_lock:
+ ret = filemap_fault(vmf);
+ bch2_pagecache_add_put(inode);
+
+ return ret;
+}
+
+vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
+{
+ struct folio *folio = page_folio(vmf->page);
+ struct file *file = vmf->vma->vm_file;
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct address_space *mapping = file->f_mapping;
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch2_folio_reservation res;
+ unsigned len;
+ loff_t isize;
+ vm_fault_t ret;
+
+ bch2_folio_reservation_init(c, inode, &res);
+
+ sb_start_pagefault(inode->v.i_sb);
+ file_update_time(file);
+
+ /*
+ * Not strictly necessary, but helps avoid dio writes livelocking in
+ * bch2_write_invalidate_inode_pages_range() - can drop this if/when we get
+ * a bch2_write_invalidate_inode_pages_range() that works without dropping
+ * page lock before invalidating page
+ */
+ bch2_pagecache_add_get(inode);
+
+ folio_lock(folio);
+ isize = i_size_read(&inode->v);
+
+ if (folio->mapping != mapping || folio_pos(folio) >= isize) {
+ folio_unlock(folio);
+ ret = VM_FAULT_NOPAGE;
+ goto out;
+ }
+
+ len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
+
+ if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
+ bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
+ folio_unlock(folio);
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+
+ bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
+ bch2_folio_reservation_put(c, inode, &res);
+
+ folio_wait_stable(folio);
+ ret = VM_FAULT_LOCKED;
+out:
+ bch2_pagecache_add_put(inode);
+ sb_end_pagefault(inode->v.i_sb);
+
+ return ret;
+}
+
+void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
+{
+ if (offset || length < folio_size(folio))
+ return;
+
+ bch2_clear_folio_bits(folio);
+}
+
+bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
+{
+ if (folio_test_dirty(folio) || folio_test_writeback(folio))
+ return false;
+
+ bch2_clear_folio_bits(folio);
+ return true;
+}
+
+/* fseek: */
+
+static int folio_data_offset(struct folio *folio, loff_t pos,
+ unsigned min_replicas)
+{
+ struct bch_folio *s = bch2_folio(folio);
+ unsigned i, sectors = folio_sectors(folio);
+
+ if (s)
+ for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
+ if (s->s[i].state >= SECTOR_dirty &&
+ s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
+ return i << SECTOR_SHIFT;
+
+ return -1;
+}
+
+loff_t bch2_seek_pagecache_data(struct inode *vinode,
+ loff_t start_offset,
+ loff_t end_offset,
+ unsigned min_replicas,
+ bool nonblock)
+{
+ struct folio_batch fbatch;
+ pgoff_t start_index = start_offset >> PAGE_SHIFT;
+ pgoff_t end_index = end_offset >> PAGE_SHIFT;
+ pgoff_t index = start_index;
+ unsigned i;
+ loff_t ret;
+ int offset;
+
+ folio_batch_init(&fbatch);
+
+ while (filemap_get_folios(vinode->i_mapping,
+ &index, end_index, &fbatch)) {
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+
+ if (!nonblock) {
+ folio_lock(folio);
+ } else if (!folio_trylock(folio)) {
+ folio_batch_release(&fbatch);
+ return -EAGAIN;
+ }
+
+ offset = folio_data_offset(folio,
+ max(folio_pos(folio), start_offset),
+ min_replicas);
+ if (offset >= 0) {
+ ret = clamp(folio_pos(folio) + offset,
+ start_offset, end_offset);
+ folio_unlock(folio);
+ folio_batch_release(&fbatch);
+ return ret;
+ }
+ folio_unlock(folio);
+ }
+ folio_batch_release(&fbatch);
+ cond_resched();
+ }
+
+ return end_offset;
+}
+
+/*
+ * Search for a hole in a folio.
+ *
+ * The filemap layer returns -ENOENT if no folio exists, so reuse the same error
+ * code to indicate a pagecache hole exists at the returned offset. Otherwise
+ * return 0 if the folio is filled with data, or an error code. This function
+ * can return -EAGAIN if nonblock is specified.
+ */
+static int folio_hole_offset(struct address_space *mapping, loff_t *offset,
+ unsigned min_replicas, bool nonblock)
+{
+ struct folio *folio;
+ struct bch_folio *s;
+ unsigned i, sectors;
+ int ret = -ENOENT;
+
+ folio = __filemap_get_folio(mapping, *offset >> PAGE_SHIFT,
+ FGP_LOCK|(nonblock ? FGP_NOWAIT : 0), 0);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ s = bch2_folio(folio);
+ if (!s)
+ goto unlock;
+
+ sectors = folio_sectors(folio);
+ for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
+ if (s->s[i].state < SECTOR_dirty ||
+ s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
+ *offset = max(*offset,
+ folio_pos(folio) + (i << SECTOR_SHIFT));
+ goto unlock;
+ }
+
+ *offset = folio_end_pos(folio);
+ ret = 0;
+unlock:
+ folio_unlock(folio);
+ folio_put(folio);
+ return ret;
+}
+
+loff_t bch2_seek_pagecache_hole(struct inode *vinode,
+ loff_t start_offset,
+ loff_t end_offset,
+ unsigned min_replicas,
+ bool nonblock)
+{
+ struct address_space *mapping = vinode->i_mapping;
+ loff_t offset = start_offset;
+ loff_t ret = 0;
+
+ while (!ret && offset < end_offset)
+ ret = folio_hole_offset(mapping, &offset, min_replicas, nonblock);
+
+ if (ret && ret != -ENOENT)
+ return ret;
+ return min(offset, end_offset);
+}
+
+int bch2_clamp_data_hole(struct inode *inode,
+ u64 *hole_start,
+ u64 *hole_end,
+ unsigned min_replicas,
+ bool nonblock)
+{
+ loff_t ret;
+
+ ret = bch2_seek_pagecache_hole(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
+ if (ret < 0)
+ return ret;
+
+ *hole_start = ret;
+
+ if (*hole_start == *hole_end)
+ return 0;
+
+ ret = bch2_seek_pagecache_data(inode,
+ *hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
+ if (ret < 0)
+ return ret;
+
+ *hole_end = ret;
+ return 0;
+}
+
+#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io-pagecache.h b/fs/bcachefs/fs-io-pagecache.h
new file mode 100644
index 000000000000..a2222ad586e9
--- /dev/null
+++ b/fs/bcachefs/fs-io-pagecache.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FS_IO_PAGECACHE_H
+#define _BCACHEFS_FS_IO_PAGECACHE_H
+
+#include <linux/pagemap.h>
+
+typedef DARRAY(struct folio *) folios;
+
+int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t,
+ u64, int, gfp_t, folios *);
+int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t);
+
+/*
+ * Use u64 for the end pos and sector helpers because if the folio covers the
+ * max supported range of the mapping, the start offset of the next folio
+ * overflows loff_t. This breaks much of the range based processing in the
+ * buffered write path.
+ */
+static inline u64 folio_end_pos(struct folio *folio)
+{
+ return folio_pos(folio) + folio_size(folio);
+}
+
+static inline size_t folio_sectors(struct folio *folio)
+{
+ return PAGE_SECTORS << folio_order(folio);
+}
+
+static inline loff_t folio_sector(struct folio *folio)
+{
+ return folio_pos(folio) >> 9;
+}
+
+static inline u64 folio_end_sector(struct folio *folio)
+{
+ return folio_end_pos(folio) >> 9;
+}
+
+#define BCH_FOLIO_SECTOR_STATE() \
+ x(unallocated) \
+ x(reserved) \
+ x(dirty) \
+ x(dirty_reserved) \
+ x(allocated)
+
+enum bch_folio_sector_state {
+#define x(n) SECTOR_##n,
+ BCH_FOLIO_SECTOR_STATE()
+#undef x
+};
+
+struct bch_folio_sector {
+ /* Uncompressed, fully allocated replicas (or on disk reservation): */
+ unsigned nr_replicas:4;
+
+ /* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
+ unsigned replicas_reserved:4;
+
+ /* i_sectors: */
+ enum bch_folio_sector_state state:8;
+};
+
+struct bch_folio {
+ spinlock_t lock;
+ atomic_t write_count;
+ /*
+ * Is the sector state up to date with the btree?
+ * (Not the data itself)
+ */
+ bool uptodate;
+ struct bch_folio_sector s[];
+};
+
+/* Helper for when we need to add debug instrumentation: */
+static inline void bch2_folio_sector_set(struct folio *folio,
+ struct bch_folio *s,
+ unsigned i, unsigned n)
+{
+ s->s[i].state = n;
+}
+
+/* file offset (to folio offset) to bch_folio_sector index */
+static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
+{
+ u64 f_offset = pos - folio_pos(folio);
+
+ BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
+ return f_offset >> SECTOR_SHIFT;
+}
+
+/* for newly allocated folios: */
+static inline void __bch2_folio_release(struct folio *folio)
+{
+ kfree(folio_detach_private(folio));
+}
+
+static inline void bch2_folio_release(struct folio *folio)
+{
+ EBUG_ON(!folio_test_locked(folio));
+ __bch2_folio_release(folio);
+}
+
+static inline struct bch_folio *__bch2_folio(struct folio *folio)
+{
+ return folio_has_private(folio)
+ ? (struct bch_folio *) folio_get_private(folio)
+ : NULL;
+}
+
+static inline struct bch_folio *bch2_folio(struct folio *folio)
+{
+ EBUG_ON(!folio_test_locked(folio));
+
+ return __bch2_folio(folio);
+}
+
+struct bch_folio *__bch2_folio_create(struct folio *, gfp_t);
+struct bch_folio *bch2_folio_create(struct folio *, gfp_t);
+
+struct bch2_folio_reservation {
+ struct disk_reservation disk;
+ struct quota_res quota;
+};
+
+static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
+{
+ /* XXX: this should not be open coded */
+ return inode->ei_inode.bi_data_replicas
+ ? inode->ei_inode.bi_data_replicas - 1
+ : c->opts.data_replicas;
+}
+
+static inline void bch2_folio_reservation_init(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct bch2_folio_reservation *res)
+{
+ memset(res, 0, sizeof(*res));
+
+ res->disk.nr_replicas = inode_nr_replicas(c, inode);
+}
+
+int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
+void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
+
+void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
+void bch2_mark_pagecache_reserved(struct bch_inode_info *, u64, u64);
+
+int bch2_get_folio_disk_reservation(struct bch_fs *,
+ struct bch_inode_info *,
+ struct folio *, bool);
+
+void bch2_folio_reservation_put(struct bch_fs *,
+ struct bch_inode_info *,
+ struct bch2_folio_reservation *);
+int bch2_folio_reservation_get(struct bch_fs *,
+ struct bch_inode_info *,
+ struct folio *,
+ struct bch2_folio_reservation *,
+ unsigned, unsigned);
+
+void bch2_set_folio_dirty(struct bch_fs *,
+ struct bch_inode_info *,
+ struct folio *,
+ struct bch2_folio_reservation *,
+ unsigned, unsigned);
+
+vm_fault_t bch2_page_fault(struct vm_fault *);
+vm_fault_t bch2_page_mkwrite(struct vm_fault *);
+void bch2_invalidate_folio(struct folio *, size_t, size_t);
+bool bch2_release_folio(struct folio *, gfp_t);
+
+loff_t bch2_seek_pagecache_data(struct inode *, loff_t, loff_t, unsigned, bool);
+loff_t bch2_seek_pagecache_hole(struct inode *, loff_t, loff_t, unsigned, bool);
+int bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned, bool);
+
+#endif /* _BCACHEFS_FS_IO_PAGECACHE_H */
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
new file mode 100644
index 000000000000..b0e8144ec550
--- /dev/null
+++ b/fs/bcachefs/fs-io.c
@@ -0,0 +1,1072 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_FS
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "clock.h"
+#include "error.h"
+#include "extents.h"
+#include "extent_update.h"
+#include "fs.h"
+#include "fs-io.h"
+#include "fs-io-buffered.h"
+#include "fs-io-pagecache.h"
+#include "fsck.h"
+#include "inode.h"
+#include "journal.h"
+#include "io_misc.h"
+#include "keylist.h"
+#include "quota.h"
+#include "reflink.h"
+#include "trace.h"
+
+#include <linux/aio.h>
+#include <linux/backing-dev.h>
+#include <linux/falloc.h>
+#include <linux/migrate.h>
+#include <linux/mmu_context.h>
+#include <linux/pagevec.h>
+#include <linux/rmap.h>
+#include <linux/sched/signal.h>
+#include <linux/task_io_accounting_ops.h>
+#include <linux/uio.h>
+
+#include <trace/events/writeback.h>
+
+struct nocow_flush {
+ struct closure *cl;
+ struct bch_dev *ca;
+ struct bio bio;
+};
+
+static void nocow_flush_endio(struct bio *_bio)
+{
+
+ struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
+
+ closure_put(bio->cl);
+ percpu_ref_put(&bio->ca->io_ref);
+ bio_put(&bio->bio);
+}
+
+void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct closure *cl)
+{
+ struct nocow_flush *bio;
+ struct bch_dev *ca;
+ struct bch_devs_mask devs;
+ unsigned dev;
+
+ dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
+ if (dev == BCH_SB_MEMBERS_MAX)
+ return;
+
+ devs = inode->ei_devs_need_flush;
+ memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
+
+ for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca && !percpu_ref_tryget(&ca->io_ref))
+ ca = NULL;
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
+
+ bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
+ REQ_OP_FLUSH,
+ GFP_KERNEL,
+ &c->nocow_flush_bioset),
+ struct nocow_flush, bio);
+ bio->cl = cl;
+ bio->ca = ca;
+ bio->bio.bi_end_io = nocow_flush_endio;
+ closure_bio_submit(&bio->bio, cl);
+ }
+}
+
+static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch2_inode_flush_nocow_writes_async(c, inode, &cl);
+ closure_sync(&cl);
+
+ return 0;
+}
+
+/* i_size updates: */
+
+struct inode_new_size {
+ loff_t new_size;
+ u64 now;
+ unsigned fields;
+};
+
+static int inode_set_size(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
+{
+ struct inode_new_size *s = p;
+
+ bi->bi_size = s->new_size;
+ if (s->fields & ATTR_ATIME)
+ bi->bi_atime = s->now;
+ if (s->fields & ATTR_MTIME)
+ bi->bi_mtime = s->now;
+ if (s->fields & ATTR_CTIME)
+ bi->bi_ctime = s->now;
+
+ return 0;
+}
+
+int __must_check bch2_write_inode_size(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ loff_t new_size, unsigned fields)
+{
+ struct inode_new_size s = {
+ .new_size = new_size,
+ .now = bch2_current_time(c),
+ .fields = fields,
+ };
+
+ return bch2_write_inode(c, inode, inode_set_size, &s, fields);
+}
+
+void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+ struct quota_res *quota_res, s64 sectors)
+{
+ bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c,
+ "inode %lu i_blocks underflow: %llu + %lli < 0 (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks, sectors,
+ inode->ei_inode.bi_sectors);
+ inode->v.i_blocks += sectors;
+
+#ifdef CONFIG_BCACHEFS_QUOTA
+ if (quota_res &&
+ !test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags) &&
+ sectors > 0) {
+ BUG_ON(sectors > quota_res->sectors);
+ BUG_ON(sectors > inode->ei_quota_reserved);
+
+ quota_res->sectors -= sectors;
+ inode->ei_quota_reserved -= sectors;
+ } else {
+ bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
+ }
+#endif
+}
+
+/* fsync: */
+
+/*
+ * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
+ * insert trigger: look up the btree inode instead
+ */
+static int bch2_flush_inode(struct bch_fs *c,
+ struct bch_inode_info *inode)
+{
+ struct bch_inode_unpacked u;
+ int ret;
+
+ if (c->opts.journal_flush_disabled)
+ return 0;
+
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
+ if (ret)
+ return ret;
+
+ return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
+}
+
+int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ int ret, ret2, ret3;
+
+ ret = file_write_and_wait_range(file, start, end);
+ ret2 = sync_inode_metadata(&inode->v, 1);
+ ret3 = bch2_flush_inode(c, inode);
+
+ return bch2_err_class(ret ?: ret2 ?: ret3);
+}
+
+/* truncate: */
+
+static inline int range_has_data(struct bch_fs *c, u32 subvol,
+ struct bpos start,
+ struct bpos end)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, subvol, &start.snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
+ if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) {
+ ret = 1;
+ break;
+ }
+ start = iter.pos;
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int __bch2_truncate_folio(struct bch_inode_info *inode,
+ pgoff_t index, loff_t start, loff_t end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct address_space *mapping = inode->v.i_mapping;
+ struct bch_folio *s;
+ unsigned start_offset;
+ unsigned end_offset;
+ unsigned i;
+ struct folio *folio;
+ s64 i_sectors_delta = 0;
+ int ret = 0;
+ u64 end_pos;
+
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR_OR_NULL(folio)) {
+ /*
+ * XXX: we're doing two index lookups when we end up reading the
+ * folio
+ */
+ ret = range_has_data(c, inode->ei_subvol,
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
+ POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
+ if (ret <= 0)
+ return ret;
+
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK|FGP_CREAT, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(folio)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ BUG_ON(start >= folio_end_pos(folio));
+ BUG_ON(end <= folio_pos(folio));
+
+ start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
+ end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
+
+ /* Folio boundary? Nothing to do */
+ if (start_offset == 0 &&
+ end_offset == folio_size(folio)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ s = bch2_folio_create(folio, 0);
+ if (!s) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ if (!folio_test_uptodate(folio)) {
+ ret = bch2_read_single_folio(folio, mapping);
+ if (ret)
+ goto unlock;
+ }
+
+ ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
+ if (ret)
+ goto unlock;
+
+ for (i = round_up(start_offset, block_bytes(c)) >> 9;
+ i < round_down(end_offset, block_bytes(c)) >> 9;
+ i++) {
+ s->s[i].nr_replicas = 0;
+
+ i_sectors_delta -= s->s[i].state == SECTOR_dirty;
+ bch2_folio_sector_set(folio, s, i, SECTOR_unallocated);
+ }
+
+ bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
+ /*
+ * Caller needs to know whether this folio will be written out by
+ * writeback - doing an i_size update if necessary - or whether it will
+ * be responsible for the i_size update.
+ *
+ * Note that we shouldn't ever see a folio beyond EOF, but check and
+ * warn if so. This has been observed by failure to clean up folios
+ * after a short write and there's still a chance reclaim will fix
+ * things up.
+ */
+ WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
+ end_pos = folio_end_pos(folio);
+ if (inode->v.i_size > folio_pos(folio))
+ end_pos = min_t(u64, inode->v.i_size, end_pos);
+ ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
+
+ folio_zero_segment(folio, start_offset, end_offset);
+
+ /*
+ * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
+ *
+ * XXX: because we aren't currently tracking whether the folio has actual
+ * data in it (vs. just 0s, or only partially written) this wrong. ick.
+ */
+ BUG_ON(bch2_get_folio_disk_reservation(c, inode, folio, false));
+
+ /*
+ * This removes any writeable userspace mappings; we need to force
+ * .page_mkwrite to be called again before any mmapped writes, to
+ * redirty the full page:
+ */
+ folio_mkclean(folio);
+ filemap_dirty_folio(mapping, folio);
+unlock:
+ folio_unlock(folio);
+ folio_put(folio);
+out:
+ return ret;
+}
+
+static int bch2_truncate_folio(struct bch_inode_info *inode, loff_t from)
+{
+ return __bch2_truncate_folio(inode, from >> PAGE_SHIFT,
+ from, ANYSINT_MAX(loff_t));
+}
+
+static int bch2_truncate_folios(struct bch_inode_info *inode,
+ loff_t start, loff_t end)
+{
+ int ret = __bch2_truncate_folio(inode, start >> PAGE_SHIFT,
+ start, end);
+
+ if (ret >= 0 &&
+ start >> PAGE_SHIFT != end >> PAGE_SHIFT)
+ ret = __bch2_truncate_folio(inode,
+ (end - 1) >> PAGE_SHIFT,
+ start, end);
+ return ret;
+}
+
+static int bch2_extend(struct mnt_idmap *idmap,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *inode_u,
+ struct iattr *iattr)
+{
+ struct address_space *mapping = inode->v.i_mapping;
+ int ret;
+
+ /*
+ * sync appends:
+ *
+ * this has to be done _before_ extending i_size:
+ */
+ ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
+ if (ret)
+ return ret;
+
+ truncate_setsize(&inode->v, iattr->ia_size);
+
+ return bch2_setattr_nonsize(idmap, inode, iattr);
+}
+
+int bchfs_truncate(struct mnt_idmap *idmap,
+ struct bch_inode_info *inode, struct iattr *iattr)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct address_space *mapping = inode->v.i_mapping;
+ struct bch_inode_unpacked inode_u;
+ s64 i_sectors_delta = 0;
+ int ret = 0;
+
+ /*
+ * If the truncate call with change the size of the file, the
+ * cmtimes should be updated. If the size will not change, we
+ * do not need to update the cmtimes.
+ */
+ if (iattr->ia_size != inode->v.i_size) {
+ if (!(iattr->ia_valid & ATTR_MTIME))
+ ktime_get_coarse_real_ts64(&iattr->ia_mtime);
+ if (!(iattr->ia_valid & ATTR_CTIME))
+ ktime_get_coarse_real_ts64(&iattr->ia_ctime);
+ iattr->ia_valid |= ATTR_MTIME|ATTR_CTIME;
+ }
+
+ inode_dio_wait(&inode->v);
+ bch2_pagecache_block_get(inode);
+
+ ret = bch2_inode_find_by_inum(c, inode_inum(inode), &inode_u);
+ if (ret)
+ goto err;
+
+ /*
+ * check this before next assertion; on filesystem error our normal
+ * invariants are a bit broken (truncate has to truncate the page cache
+ * before the inode).
+ */
+ ret = bch2_journal_error(&c->journal);
+ if (ret)
+ goto err;
+
+ WARN_ONCE(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
+ inode->v.i_size < inode_u.bi_size,
+ "truncate spotted in mem i_size < btree i_size: %llu < %llu\n",
+ (u64) inode->v.i_size, inode_u.bi_size);
+
+ if (iattr->ia_size > inode->v.i_size) {
+ ret = bch2_extend(idmap, inode, &inode_u, iattr);
+ goto err;
+ }
+
+ iattr->ia_valid &= ~ATTR_SIZE;
+
+ ret = bch2_truncate_folio(inode, iattr->ia_size);
+ if (unlikely(ret < 0))
+ goto err;
+
+ truncate_setsize(&inode->v, iattr->ia_size);
+
+ /*
+ * When extending, we're going to write the new i_size to disk
+ * immediately so we need to flush anything above the current on disk
+ * i_size first:
+ *
+ * Also, when extending we need to flush the page that i_size currently
+ * straddles - if it's mapped to userspace, we need to ensure that
+ * userspace has to redirty it and call .mkwrite -> set_page_dirty
+ * again to allocate the part of the page that was extended.
+ */
+ if (iattr->ia_size > inode_u.bi_size)
+ ret = filemap_write_and_wait_range(mapping,
+ inode_u.bi_size,
+ iattr->ia_size - 1);
+ else if (iattr->ia_size & (PAGE_SIZE - 1))
+ ret = filemap_write_and_wait_range(mapping,
+ round_down(iattr->ia_size, PAGE_SIZE),
+ iattr->ia_size - 1);
+ if (ret)
+ goto err;
+
+ ret = bch2_truncate(c, inode_inum(inode), iattr->ia_size, &i_sectors_delta);
+ bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
+ if (unlikely(ret)) {
+ /*
+ * If we error here, VFS caches are now inconsistent with btree
+ */
+ set_bit(EI_INODE_ERROR, &inode->ei_flags);
+ goto err;
+ }
+
+ bch2_fs_inconsistent_on(!inode->v.i_size && inode->v.i_blocks &&
+ !bch2_journal_error(&c->journal), c,
+ "inode %lu truncated to 0 but i_blocks %llu (ondisk %lli)",
+ inode->v.i_ino, (u64) inode->v.i_blocks,
+ inode->ei_inode.bi_sectors);
+
+ ret = bch2_setattr_nonsize(idmap, inode, iattr);
+err:
+ bch2_pagecache_block_put(inode);
+ return bch2_err_class(ret);
+}
+
+/* fallocate: */
+
+static int inode_update_times_fn(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi, void *p)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+
+ bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
+ return 0;
+}
+
+static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ u64 end = offset + len;
+ u64 block_start = round_up(offset, block_bytes(c));
+ u64 block_end = round_down(end, block_bytes(c));
+ bool truncated_last_page;
+ int ret = 0;
+
+ ret = bch2_truncate_folios(inode, offset, end);
+ if (unlikely(ret < 0))
+ goto err;
+
+ truncated_last_page = ret;
+
+ truncate_pagecache_range(&inode->v, offset, end - 1);
+
+ if (block_start < block_end) {
+ s64 i_sectors_delta = 0;
+
+ ret = bch2_fpunch(c, inode_inum(inode),
+ block_start >> 9, block_end >> 9,
+ &i_sectors_delta);
+ bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+ }
+
+ mutex_lock(&inode->ei_update_lock);
+ if (end >= inode->v.i_size && !truncated_last_page) {
+ ret = bch2_write_inode_size(c, inode, inode->v.i_size,
+ ATTR_MTIME|ATTR_CTIME);
+ } else {
+ ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+ ATTR_MTIME|ATTR_CTIME);
+ }
+ mutex_unlock(&inode->ei_update_lock);
+err:
+ return ret;
+}
+
+static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
+ loff_t offset, loff_t len,
+ bool insert)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct address_space *mapping = inode->v.i_mapping;
+ s64 i_sectors_delta = 0;
+ int ret = 0;
+
+ if ((offset | len) & (block_bytes(c) - 1))
+ return -EINVAL;
+
+ if (insert) {
+ if (offset >= inode->v.i_size)
+ return -EINVAL;
+ } else {
+ if (offset + len >= inode->v.i_size)
+ return -EINVAL;
+ }
+
+ ret = bch2_write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
+ if (ret)
+ return ret;
+
+ if (insert)
+ i_size_write(&inode->v, inode->v.i_size + len);
+
+ ret = bch2_fcollapse_finsert(c, inode_inum(inode), offset >> 9, len >> 9,
+ insert, &i_sectors_delta);
+ if (!ret && !insert)
+ i_size_write(&inode->v, inode->v.i_size - len);
+ bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
+
+ return ret;
+}
+
+static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
+ u64 start_sector, u64 end_sector)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bpos end_pos = POS(inode->v.i_ino, end_sector);
+ struct bch_io_opts opts;
+ int ret = 0;
+
+ bch2_inode_opts_get(&opts, c, &inode->ei_inode);
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ POS(inode->v.i_ino, start_sector),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+
+ while (!ret && bkey_lt(iter.pos, end_pos)) {
+ s64 i_sectors_delta = 0;
+ struct quota_res quota_res = { 0 };
+ struct bkey_s_c k;
+ unsigned sectors;
+ bool is_allocation;
+ u64 hole_start, hole_end;
+ u32 snapshot;
+
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans,
+ inode->ei_subvol, &snapshot);
+ if (ret)
+ goto bkey_err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ if ((ret = bkey_err(k)))
+ goto bkey_err;
+
+ hole_start = iter.pos.offset;
+ hole_end = bpos_min(k.k->p, end_pos).offset;
+ is_allocation = bkey_extent_is_allocation(k.k);
+
+ /* already reserved */
+ if (bkey_extent_is_reservation(k) &&
+ bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
+ bch2_btree_iter_advance(&iter);
+ continue;
+ }
+
+ if (bkey_extent_is_data(k.k) &&
+ !(mode & FALLOC_FL_ZERO_RANGE)) {
+ bch2_btree_iter_advance(&iter);
+ continue;
+ }
+
+ if (!(mode & FALLOC_FL_ZERO_RANGE)) {
+ /*
+ * Lock ordering - can't be holding btree locks while
+ * blocking on a folio lock:
+ */
+ if (bch2_clamp_data_hole(&inode->v,
+ &hole_start,
+ &hole_end,
+ opts.data_replicas, true))
+ ret = drop_locks_do(trans,
+ (bch2_clamp_data_hole(&inode->v,
+ &hole_start,
+ &hole_end,
+ opts.data_replicas, false), 0));
+ bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+
+ if (ret)
+ goto bkey_err;
+
+ if (hole_start == hole_end)
+ continue;
+ }
+
+ sectors = hole_end - hole_start;
+
+ if (!is_allocation) {
+ ret = bch2_quota_reservation_add(c, inode,
+ &quota_res, sectors, true);
+ if (unlikely(ret))
+ goto bkey_err;
+ }
+
+ ret = bch2_extent_fallocate(trans, inode_inum(inode), &iter,
+ sectors, opts, &i_sectors_delta,
+ writepoint_hashed((unsigned long) current));
+ if (ret)
+ goto bkey_err;
+
+ bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
+
+ drop_locks_do(trans,
+ (bch2_mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0));
+bkey_err:
+ bch2_quota_reservation_put(c, inode, &quota_res);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
+ }
+
+ if (bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)) {
+ struct quota_res quota_res = { 0 };
+ s64 i_sectors_delta = 0;
+
+ bch2_fpunch_at(trans, &iter, inode_inum(inode),
+ end_sector, &i_sectors_delta);
+ bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
+ bch2_quota_reservation_put(c, inode, &quota_res);
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
+ loff_t offset, loff_t len)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ u64 end = offset + len;
+ u64 block_start = round_down(offset, block_bytes(c));
+ u64 block_end = round_up(end, block_bytes(c));
+ bool truncated_last_page = false;
+ int ret, ret2 = 0;
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
+ ret = inode_newsize_ok(&inode->v, end);
+ if (ret)
+ return ret;
+ }
+
+ if (mode & FALLOC_FL_ZERO_RANGE) {
+ ret = bch2_truncate_folios(inode, offset, end);
+ if (unlikely(ret < 0))
+ return ret;
+
+ truncated_last_page = ret;
+
+ truncate_pagecache_range(&inode->v, offset, end - 1);
+
+ block_start = round_up(offset, block_bytes(c));
+ block_end = round_down(end, block_bytes(c));
+ }
+
+ ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9);
+
+ /*
+ * On -ENOSPC in ZERO_RANGE mode, we still want to do the inode update,
+ * so that the VFS cache i_size is consistent with the btree i_size:
+ */
+ if (ret &&
+ !(bch2_err_matches(ret, ENOSPC) && (mode & FALLOC_FL_ZERO_RANGE)))
+ return ret;
+
+ if (mode & FALLOC_FL_KEEP_SIZE && end > inode->v.i_size)
+ end = inode->v.i_size;
+
+ if (end >= inode->v.i_size &&
+ (((mode & FALLOC_FL_ZERO_RANGE) && !truncated_last_page) ||
+ !(mode & FALLOC_FL_KEEP_SIZE))) {
+ spin_lock(&inode->v.i_lock);
+ i_size_write(&inode->v, end);
+ spin_unlock(&inode->v.i_lock);
+
+ mutex_lock(&inode->ei_update_lock);
+ ret2 = bch2_write_inode_size(c, inode, end, 0);
+ mutex_unlock(&inode->ei_update_lock);
+ }
+
+ return ret ?: ret2;
+}
+
+long bch2_fallocate_dispatch(struct file *file, int mode,
+ loff_t offset, loff_t len)
+{
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ long ret;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fallocate))
+ return -EROFS;
+
+ inode_lock(&inode->v);
+ inode_dio_wait(&inode->v);
+ bch2_pagecache_block_get(inode);
+
+ ret = file_modified(file);
+ if (ret)
+ goto err;
+
+ if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
+ ret = bchfs_fallocate(inode, mode, offset, len);
+ else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
+ ret = bchfs_fpunch(inode, offset, len);
+ else if (mode == FALLOC_FL_INSERT_RANGE)
+ ret = bchfs_fcollapse_finsert(inode, offset, len, true);
+ else if (mode == FALLOC_FL_COLLAPSE_RANGE)
+ ret = bchfs_fcollapse_finsert(inode, offset, len, false);
+ else
+ ret = -EOPNOTSUPP;
+err:
+ bch2_pagecache_block_put(inode);
+ inode_unlock(&inode->v);
+ bch2_write_ref_put(c, BCH_WRITE_REF_fallocate);
+
+ return bch2_err_class(ret);
+}
+
+/*
+ * Take a quota reservation for unallocated blocks in a given file range
+ * Does not check pagecache
+ */
+static int quota_reserve_range(struct bch_inode_info *inode,
+ struct quota_res *res,
+ u64 start, u64 end)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 snapshot;
+ u64 sectors = end - start;
+ u64 pos = start;
+ int ret;
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, pos, snapshot), 0);
+
+ while (!(ret = btree_trans_too_many_iters(trans)) &&
+ (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k &&
+ !(ret = bkey_err(k))) {
+ if (bkey_extent_is_allocation(k.k)) {
+ u64 s = min(end, k.k->p.offset) -
+ max(start, bkey_start_offset(k.k));
+ BUG_ON(s > sectors);
+ sectors -= s;
+ }
+ bch2_btree_iter_advance(&iter);
+ }
+ pos = iter.pos.offset;
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+
+ return ret ?: bch2_quota_reservation_add(c, inode, res, sectors, true);
+}
+
+loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
+ struct file *file_dst, loff_t pos_dst,
+ loff_t len, unsigned remap_flags)
+{
+ struct bch_inode_info *src = file_bch_inode(file_src);
+ struct bch_inode_info *dst = file_bch_inode(file_dst);
+ struct bch_fs *c = src->v.i_sb->s_fs_info;
+ struct quota_res quota_res = { 0 };
+ s64 i_sectors_delta = 0;
+ u64 aligned_len;
+ loff_t ret = 0;
+
+ if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
+ return -EINVAL;
+
+ if (remap_flags & REMAP_FILE_DEDUP)
+ return -EOPNOTSUPP;
+
+ if ((pos_src & (block_bytes(c) - 1)) ||
+ (pos_dst & (block_bytes(c) - 1)))
+ return -EINVAL;
+
+ if (src == dst &&
+ abs(pos_src - pos_dst) < len)
+ return -EINVAL;
+
+ bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
+
+ inode_dio_wait(&src->v);
+ inode_dio_wait(&dst->v);
+
+ ret = generic_remap_file_range_prep(file_src, pos_src,
+ file_dst, pos_dst,
+ &len, remap_flags);
+ if (ret < 0 || len == 0)
+ goto err;
+
+ aligned_len = round_up((u64) len, block_bytes(c));
+
+ ret = bch2_write_invalidate_inode_pages_range(dst->v.i_mapping,
+ pos_dst, pos_dst + len - 1);
+ if (ret)
+ goto err;
+
+ ret = quota_reserve_range(dst, &quota_res, pos_dst >> 9,
+ (pos_dst + aligned_len) >> 9);
+ if (ret)
+ goto err;
+
+ file_update_time(file_dst);
+
+ bch2_mark_pagecache_unallocated(src, pos_src >> 9,
+ (pos_src + aligned_len) >> 9);
+
+ ret = bch2_remap_range(c,
+ inode_inum(dst), pos_dst >> 9,
+ inode_inum(src), pos_src >> 9,
+ aligned_len >> 9,
+ pos_dst + len, &i_sectors_delta);
+ if (ret < 0)
+ goto err;
+
+ /*
+ * due to alignment, we might have remapped slightly more than requsted
+ */
+ ret = min((u64) ret << 9, (u64) len);
+
+ bch2_i_sectors_acct(c, dst, &quota_res, i_sectors_delta);
+
+ spin_lock(&dst->v.i_lock);
+ if (pos_dst + ret > dst->v.i_size)
+ i_size_write(&dst->v, pos_dst + ret);
+ spin_unlock(&dst->v.i_lock);
+
+ if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
+ IS_SYNC(file_inode(file_dst)))
+ ret = bch2_flush_inode(c, dst);
+err:
+ bch2_quota_reservation_put(c, dst, &quota_res);
+ bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
+
+ return bch2_err_class(ret);
+}
+
+/* fseek: */
+
+static loff_t bch2_seek_data(struct file *file, u64 offset)
+{
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ subvol_inum inum = inode_inum(inode);
+ u64 isize, next_data = MAX_LFS_FILESIZE;
+ u32 snapshot;
+ int ret;
+
+ isize = i_size_read(&inode->v);
+ if (offset >= isize)
+ return -ENXIO;
+
+ trans = bch2_trans_get(c);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
+ POS(inode->v.i_ino, U64_MAX),
+ 0, k, ret) {
+ if (bkey_extent_is_data(k.k)) {
+ next_data = max(offset, bkey_start_offset(k.k) << 9);
+ break;
+ } else if (k.k->p.offset >> 9 > isize)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+ if (ret)
+ return ret;
+
+ if (next_data > offset)
+ next_data = bch2_seek_pagecache_data(&inode->v,
+ offset, next_data, 0, false);
+
+ if (next_data >= isize)
+ return -ENXIO;
+
+ return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
+}
+
+static loff_t bch2_seek_hole(struct file *file, u64 offset)
+{
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ subvol_inum inum = inode_inum(inode);
+ u64 isize, next_hole = MAX_LFS_FILESIZE;
+ u32 snapshot;
+ int ret;
+
+ isize = i_size_read(&inode->v);
+ if (offset >= isize)
+ return -ENXIO;
+
+ trans = bch2_trans_get(c);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
+ SPOS(inode->v.i_ino, offset >> 9, snapshot),
+ BTREE_ITER_SLOTS, k, ret) {
+ if (k.k->p.inode != inode->v.i_ino) {
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
+ offset, MAX_LFS_FILESIZE, 0, false);
+ break;
+ } else if (!bkey_extent_is_data(k.k)) {
+ next_hole = bch2_seek_pagecache_hole(&inode->v,
+ max(offset, bkey_start_offset(k.k) << 9),
+ k.k->p.offset << 9, 0, false);
+
+ if (next_hole < k.k->p.offset << 9)
+ break;
+ } else {
+ offset = max(offset, bkey_start_offset(k.k) << 9);
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+ if (ret)
+ return ret;
+
+ if (next_hole > isize)
+ next_hole = isize;
+
+ return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
+}
+
+loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t ret;
+
+ switch (whence) {
+ case SEEK_SET:
+ case SEEK_CUR:
+ case SEEK_END:
+ ret = generic_file_llseek(file, offset, whence);
+ break;
+ case SEEK_DATA:
+ ret = bch2_seek_data(file, offset);
+ break;
+ case SEEK_HOLE:
+ ret = bch2_seek_hole(file, offset);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return bch2_err_class(ret);
+}
+
+void bch2_fs_fsio_exit(struct bch_fs *c)
+{
+ bioset_exit(&c->nocow_flush_bioset);
+}
+
+int bch2_fs_fsio_init(struct bch_fs *c)
+{
+ if (bioset_init(&c->nocow_flush_bioset,
+ 1, offsetof(struct nocow_flush, bio), 0))
+ return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
+
+ return 0;
+}
+
+#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-io.h b/fs/bcachefs/fs-io.h
new file mode 100644
index 000000000000..ca70346e68dc
--- /dev/null
+++ b/fs/bcachefs/fs-io.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FS_IO_H
+#define _BCACHEFS_FS_IO_H
+
+#ifndef NO_BCACHEFS_FS
+
+#include "buckets.h"
+#include "fs.h"
+#include "io_write_types.h"
+#include "quota.h"
+
+#include <linux/uio.h>
+
+struct folio_vec {
+ struct folio *fv_folio;
+ size_t fv_offset;
+ size_t fv_len;
+};
+
+static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
+{
+
+ struct folio *folio = page_folio(bv.bv_page);
+ size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
+ bv.bv_offset;
+ size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
+
+ return (struct folio_vec) {
+ .fv_folio = folio,
+ .fv_offset = offset,
+ .fv_len = len,
+ };
+}
+
+static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
+ struct bvec_iter iter)
+{
+ return biovec_to_foliovec(bio_iter_iovec(bio, iter));
+}
+
+#define __bio_for_each_folio(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
+ bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
+
+/**
+ * bio_for_each_folio - iterate over folios within a bio
+ *
+ * Like other non-_all versions, this iterates over what bio->bi_iter currently
+ * points to. This version is for drivers, where the bio may have previously
+ * been split or cloned.
+ */
+#define bio_for_each_folio(bvl, bio, iter) \
+ __bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
+
+struct quota_res {
+ u64 sectors;
+};
+
+#ifdef CONFIG_BCACHEFS_QUOTA
+
+static inline void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
+{
+ BUG_ON(res->sectors > inode->ei_quota_reserved);
+
+ bch2_quota_acct(c, inode->ei_qid, Q_SPC,
+ -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
+ inode->ei_quota_reserved -= res->sectors;
+ res->sectors = 0;
+}
+
+static inline void bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res)
+{
+ if (res->sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __bch2_quota_reservation_put(c, inode, res);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+}
+
+static inline int bch2_quota_reservation_add(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res,
+ u64 sectors,
+ bool check_enospc)
+{
+ int ret;
+
+ if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
+ return 0;
+
+ mutex_lock(&inode->ei_quota_lock);
+ ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
+ check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
+ if (likely(!ret)) {
+ inode->ei_quota_reserved += sectors;
+ res->sectors += sectors;
+ }
+ mutex_unlock(&inode->ei_quota_lock);
+
+ return ret;
+}
+
+#else
+
+static inline void __bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res) {}
+
+static inline void bch2_quota_reservation_put(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res) {}
+
+static inline int bch2_quota_reservation_add(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct quota_res *res,
+ unsigned sectors,
+ bool check_enospc)
+{
+ return 0;
+}
+
+#endif
+
+void __bch2_i_sectors_acct(struct bch_fs *, struct bch_inode_info *,
+ struct quota_res *, s64);
+
+static inline void bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
+ struct quota_res *quota_res, s64 sectors)
+{
+ if (sectors) {
+ mutex_lock(&inode->ei_quota_lock);
+ __bch2_i_sectors_acct(c, inode, quota_res, sectors);
+ mutex_unlock(&inode->ei_quota_lock);
+ }
+}
+
+static inline struct address_space *faults_disabled_mapping(void)
+{
+ return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
+}
+
+static inline void set_fdm_dropped_locks(void)
+{
+ current->faults_disabled_mapping =
+ (void *) (((unsigned long) current->faults_disabled_mapping)|1);
+}
+
+static inline bool fdm_dropped_locks(void)
+{
+ return ((unsigned long) current->faults_disabled_mapping) & 1;
+}
+
+void bch2_inode_flush_nocow_writes_async(struct bch_fs *,
+ struct bch_inode_info *, struct closure *);
+
+int __must_check bch2_write_inode_size(struct bch_fs *,
+ struct bch_inode_info *,
+ loff_t, unsigned);
+
+int bch2_fsync(struct file *, loff_t, loff_t, int);
+
+int bchfs_truncate(struct mnt_idmap *,
+ struct bch_inode_info *, struct iattr *);
+long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t);
+
+loff_t bch2_remap_file_range(struct file *, loff_t, struct file *,
+ loff_t, loff_t, unsigned);
+
+loff_t bch2_llseek(struct file *, loff_t, int);
+
+void bch2_fs_fsio_exit(struct bch_fs *);
+int bch2_fs_fsio_init(struct bch_fs *);
+#else
+static inline void bch2_fs_fsio_exit(struct bch_fs *c) {}
+static inline int bch2_fs_fsio_init(struct bch_fs *c) { return 0; }
+#endif
+
+#endif /* _BCACHEFS_FS_IO_H */
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
new file mode 100644
index 000000000000..6040bd3f0778
--- /dev/null
+++ b/fs/bcachefs/fs-ioctl.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_FS
+
+#include "bcachefs.h"
+#include "chardev.h"
+#include "dirent.h"
+#include "fs.h"
+#include "fs-common.h"
+#include "fs-ioctl.h"
+#include "quota.h"
+
+#include <linux/compat.h>
+#include <linux/fsnotify.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/writeback.h>
+
+#define FS_IOC_GOINGDOWN _IOR('X', 125, __u32)
+#define FSOP_GOING_FLAGS_DEFAULT 0x0 /* going down */
+#define FSOP_GOING_FLAGS_LOGFLUSH 0x1 /* flush log but not data */
+#define FSOP_GOING_FLAGS_NOLOGFLUSH 0x2 /* don't flush log nor data */
+
+struct flags_set {
+ unsigned mask;
+ unsigned flags;
+
+ unsigned projid;
+
+ bool set_projinherit;
+ bool projinherit;
+};
+
+static int bch2_inode_flags_set(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ /*
+ * We're relying on btree locking here for exclusion with other ioctl
+ * calls - use the flags in the btree (@bi), not inode->i_flags:
+ */
+ struct flags_set *s = p;
+ unsigned newflags = s->flags;
+ unsigned oldflags = bi->bi_flags & s->mask;
+
+ if (((newflags ^ oldflags) & (BCH_INODE_APPEND|BCH_INODE_IMMUTABLE)) &&
+ !capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ if (!S_ISREG(bi->bi_mode) &&
+ !S_ISDIR(bi->bi_mode) &&
+ (newflags & (BCH_INODE_NODUMP|BCH_INODE_NOATIME)) != newflags)
+ return -EINVAL;
+
+ if (s->set_projinherit) {
+ bi->bi_fields_set &= ~(1 << Inode_opt_project);
+ bi->bi_fields_set |= ((int) s->projinherit << Inode_opt_project);
+ }
+
+ bi->bi_flags &= ~s->mask;
+ bi->bi_flags |= newflags;
+
+ bi->bi_ctime = timespec_to_bch2_time(c, current_time(&inode->v));
+ return 0;
+}
+
+static int bch2_ioc_getflags(struct bch_inode_info *inode, int __user *arg)
+{
+ unsigned flags = map_flags(bch_flags_to_uflags, inode->ei_inode.bi_flags);
+
+ return put_user(flags, arg);
+}
+
+static int bch2_ioc_setflags(struct bch_fs *c,
+ struct file *file,
+ struct bch_inode_info *inode,
+ void __user *arg)
+{
+ struct flags_set s = { .mask = map_defined(bch_flags_to_uflags) };
+ unsigned uflags;
+ int ret;
+
+ if (get_user(uflags, (int __user *) arg))
+ return -EFAULT;
+
+ s.flags = map_flags_rev(bch_flags_to_uflags, uflags);
+ if (uflags)
+ return -EOPNOTSUPP;
+
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ inode_lock(&inode->v);
+ if (!inode_owner_or_capable(file_mnt_idmap(file), &inode->v)) {
+ ret = -EACCES;
+ goto setflags_out;
+ }
+
+ mutex_lock(&inode->ei_update_lock);
+ ret = bch2_write_inode(c, inode, bch2_inode_flags_set, &s,
+ ATTR_CTIME);
+ mutex_unlock(&inode->ei_update_lock);
+
+setflags_out:
+ inode_unlock(&inode->v);
+ mnt_drop_write_file(file);
+ return ret;
+}
+
+static int bch2_ioc_fsgetxattr(struct bch_inode_info *inode,
+ struct fsxattr __user *arg)
+{
+ struct fsxattr fa = { 0 };
+
+ fa.fsx_xflags = map_flags(bch_flags_to_xflags, inode->ei_inode.bi_flags);
+
+ if (inode->ei_inode.bi_fields_set & (1 << Inode_opt_project))
+ fa.fsx_xflags |= FS_XFLAG_PROJINHERIT;
+
+ fa.fsx_projid = inode->ei_qid.q[QTYP_PRJ];
+
+ if (copy_to_user(arg, &fa, sizeof(fa)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int fssetxattr_inode_update_fn(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
+{
+ struct flags_set *s = p;
+
+ if (s->projid != bi->bi_project) {
+ bi->bi_fields_set |= 1U << Inode_opt_project;
+ bi->bi_project = s->projid;
+ }
+
+ return bch2_inode_flags_set(trans, inode, bi, p);
+}
+
+static int bch2_ioc_fssetxattr(struct bch_fs *c,
+ struct file *file,
+ struct bch_inode_info *inode,
+ struct fsxattr __user *arg)
+{
+ struct flags_set s = { .mask = map_defined(bch_flags_to_xflags) };
+ struct fsxattr fa;
+ int ret;
+
+ if (copy_from_user(&fa, arg, sizeof(fa)))
+ return -EFAULT;
+
+ s.set_projinherit = true;
+ s.projinherit = (fa.fsx_xflags & FS_XFLAG_PROJINHERIT) != 0;
+ fa.fsx_xflags &= ~FS_XFLAG_PROJINHERIT;
+
+ s.flags = map_flags_rev(bch_flags_to_xflags, fa.fsx_xflags);
+ if (fa.fsx_xflags)
+ return -EOPNOTSUPP;
+
+ if (fa.fsx_projid >= U32_MAX)
+ return -EINVAL;
+
+ /*
+ * inode fields accessible via the xattr interface are stored with a +1
+ * bias, so that 0 means unset:
+ */
+ s.projid = fa.fsx_projid + 1;
+
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ inode_lock(&inode->v);
+ if (!inode_owner_or_capable(file_mnt_idmap(file), &inode->v)) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ mutex_lock(&inode->ei_update_lock);
+ ret = bch2_set_projid(c, inode, fa.fsx_projid);
+ if (ret)
+ goto err_unlock;
+
+ ret = bch2_write_inode(c, inode, fssetxattr_inode_update_fn, &s,
+ ATTR_CTIME);
+err_unlock:
+ mutex_unlock(&inode->ei_update_lock);
+err:
+ inode_unlock(&inode->v);
+ mnt_drop_write_file(file);
+ return ret;
+}
+
+static int bch2_reinherit_attrs_fn(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
+{
+ struct bch_inode_info *dir = p;
+
+ return !bch2_reinherit_attrs(bi, &dir->ei_inode);
+}
+
+static int bch2_ioc_reinherit_attrs(struct bch_fs *c,
+ struct file *file,
+ struct bch_inode_info *src,
+ const char __user *name)
+{
+ struct bch_hash_info hash = bch2_hash_info_init(c, &src->ei_inode);
+ struct bch_inode_info *dst;
+ struct inode *vinode = NULL;
+ char *kname = NULL;
+ struct qstr qstr;
+ int ret = 0;
+ subvol_inum inum;
+
+ kname = kmalloc(BCH_NAME_MAX + 1, GFP_KERNEL);
+ if (!kname)
+ return -ENOMEM;
+
+ ret = strncpy_from_user(kname, name, BCH_NAME_MAX);
+ if (unlikely(ret < 0))
+ goto err1;
+
+ qstr.len = ret;
+ qstr.name = kname;
+
+ ret = bch2_dirent_lookup(c, inode_inum(src), &hash, &qstr, &inum);
+ if (ret)
+ goto err1;
+
+ vinode = bch2_vfs_inode_get(c, inum);
+ ret = PTR_ERR_OR_ZERO(vinode);
+ if (ret)
+ goto err1;
+
+ dst = to_bch_ei(vinode);
+
+ ret = mnt_want_write_file(file);
+ if (ret)
+ goto err2;
+
+ bch2_lock_inodes(INODE_UPDATE_LOCK, src, dst);
+
+ if (inode_attr_changing(src, dst, Inode_opt_project)) {
+ ret = bch2_fs_quota_transfer(c, dst,
+ src->ei_qid,
+ 1 << QTYP_PRJ,
+ KEY_TYPE_QUOTA_PREALLOC);
+ if (ret)
+ goto err3;
+ }
+
+ ret = bch2_write_inode(c, dst, bch2_reinherit_attrs_fn, src, 0);
+err3:
+ bch2_unlock_inodes(INODE_UPDATE_LOCK, src, dst);
+
+ /* return true if we did work */
+ if (ret >= 0)
+ ret = !ret;
+
+ mnt_drop_write_file(file);
+err2:
+ iput(vinode);
+err1:
+ kfree(kname);
+
+ return ret;
+}
+
+static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg)
+{
+ u32 flags;
+ int ret = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (get_user(flags, arg))
+ return -EFAULT;
+
+ bch_notice(c, "shutdown by ioctl type %u", flags);
+
+ down_write(&c->vfs_sb->s_umount);
+
+ switch (flags) {
+ case FSOP_GOING_FLAGS_DEFAULT:
+ ret = freeze_bdev(c->vfs_sb->s_bdev);
+ if (ret)
+ goto err;
+
+ bch2_journal_flush(&c->journal);
+ c->vfs_sb->s_flags |= SB_RDONLY;
+ bch2_fs_emergency_read_only(c);
+ thaw_bdev(c->vfs_sb->s_bdev);
+ break;
+
+ case FSOP_GOING_FLAGS_LOGFLUSH:
+ bch2_journal_flush(&c->journal);
+ fallthrough;
+
+ case FSOP_GOING_FLAGS_NOLOGFLUSH:
+ c->vfs_sb->s_flags |= SB_RDONLY;
+ bch2_fs_emergency_read_only(c);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+err:
+ up_write(&c->vfs_sb->s_umount);
+ return ret;
+}
+
+static long __bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
+ struct bch_ioctl_subvolume arg)
+{
+ struct inode *dir;
+ struct bch_inode_info *inode;
+ struct user_namespace *s_user_ns;
+ struct dentry *dst_dentry;
+ struct path src_path, dst_path;
+ int how = LOOKUP_FOLLOW;
+ int error;
+ subvol_inum snapshot_src = { 0 };
+ unsigned lookup_flags = 0;
+ unsigned create_flags = BCH_CREATE_SUBVOL;
+
+ if (arg.flags & ~(BCH_SUBVOL_SNAPSHOT_CREATE|
+ BCH_SUBVOL_SNAPSHOT_RO))
+ return -EINVAL;
+
+ if (!(arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) &&
+ (arg.src_ptr ||
+ (arg.flags & BCH_SUBVOL_SNAPSHOT_RO)))
+ return -EINVAL;
+
+ if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE)
+ create_flags |= BCH_CREATE_SNAPSHOT;
+
+ if (arg.flags & BCH_SUBVOL_SNAPSHOT_RO)
+ create_flags |= BCH_CREATE_SNAPSHOT_RO;
+
+ /* why do we need this lock? */
+ down_read(&c->vfs_sb->s_umount);
+
+ if (arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE)
+ sync_inodes_sb(c->vfs_sb);
+retry:
+ if (arg.src_ptr) {
+ error = user_path_at(arg.dirfd,
+ (const char __user *)(unsigned long)arg.src_ptr,
+ how, &src_path);
+ if (error)
+ goto err1;
+
+ if (src_path.dentry->d_sb->s_fs_info != c) {
+ path_put(&src_path);
+ error = -EXDEV;
+ goto err1;
+ }
+
+ snapshot_src = inode_inum(to_bch_ei(src_path.dentry->d_inode));
+ }
+
+ dst_dentry = user_path_create(arg.dirfd,
+ (const char __user *)(unsigned long)arg.dst_ptr,
+ &dst_path, lookup_flags);
+ error = PTR_ERR_OR_ZERO(dst_dentry);
+ if (error)
+ goto err2;
+
+ if (dst_dentry->d_sb->s_fs_info != c) {
+ error = -EXDEV;
+ goto err3;
+ }
+
+ if (dst_dentry->d_inode) {
+ error = -EEXIST;
+ goto err3;
+ }
+
+ dir = dst_path.dentry->d_inode;
+ if (IS_DEADDIR(dir)) {
+ error = -BCH_ERR_ENOENT_directory_dead;
+ goto err3;
+ }
+
+ s_user_ns = dir->i_sb->s_user_ns;
+ if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
+ !kgid_has_mapping(s_user_ns, current_fsgid())) {
+ error = -EOVERFLOW;
+ goto err3;
+ }
+
+ error = inode_permission(file_mnt_idmap(filp),
+ dir, MAY_WRITE | MAY_EXEC);
+ if (error)
+ goto err3;
+
+ if (!IS_POSIXACL(dir))
+ arg.mode &= ~current_umask();
+
+ error = security_path_mkdir(&dst_path, dst_dentry, arg.mode);
+ if (error)
+ goto err3;
+
+ if ((arg.flags & BCH_SUBVOL_SNAPSHOT_CREATE) &&
+ !arg.src_ptr)
+ snapshot_src.subvol = to_bch_ei(dir)->ei_inode.bi_subvol;
+
+ inode = __bch2_create(file_mnt_idmap(filp), to_bch_ei(dir),
+ dst_dentry, arg.mode|S_IFDIR,
+ 0, snapshot_src, create_flags);
+ error = PTR_ERR_OR_ZERO(inode);
+ if (error)
+ goto err3;
+
+ d_instantiate(dst_dentry, &inode->v);
+ fsnotify_mkdir(dir, dst_dentry);
+err3:
+ done_path_create(&dst_path, dst_dentry);
+err2:
+ if (arg.src_ptr)
+ path_put(&src_path);
+
+ if (retry_estale(error, lookup_flags)) {
+ lookup_flags |= LOOKUP_REVAL;
+ goto retry;
+ }
+err1:
+ up_read(&c->vfs_sb->s_umount);
+
+ return error;
+}
+
+static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
+ struct bch_ioctl_subvolume arg)
+{
+ down_write(&c->snapshot_create_lock);
+ long ret = __bch2_ioctl_subvolume_create(c, filp, arg);
+ up_write(&c->snapshot_create_lock);
+
+ return ret;
+}
+
+static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
+ struct bch_ioctl_subvolume arg)
+{
+ struct path path;
+ struct inode *dir;
+ int ret = 0;
+
+ if (arg.flags)
+ return -EINVAL;
+
+ ret = user_path_at(arg.dirfd,
+ (const char __user *)(unsigned long)arg.dst_ptr,
+ LOOKUP_FOLLOW, &path);
+ if (ret)
+ return ret;
+
+ if (path.dentry->d_sb->s_fs_info != c) {
+ ret = -EXDEV;
+ goto err;
+ }
+
+ dir = path.dentry->d_parent->d_inode;
+
+ ret = __bch2_unlink(dir, path.dentry, true);
+ if (ret)
+ goto err;
+
+ fsnotify_rmdir(dir, path.dentry);
+ d_delete(path.dentry);
+err:
+ path_put(&path);
+ return ret;
+}
+
+long bch2_fs_file_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ long ret;
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ ret = bch2_ioc_getflags(inode, (int __user *) arg);
+ break;
+
+ case FS_IOC_SETFLAGS:
+ ret = bch2_ioc_setflags(c, file, inode, (int __user *) arg);
+ break;
+
+ case FS_IOC_FSGETXATTR:
+ ret = bch2_ioc_fsgetxattr(inode, (void __user *) arg);
+ break;
+
+ case FS_IOC_FSSETXATTR:
+ ret = bch2_ioc_fssetxattr(c, file, inode,
+ (void __user *) arg);
+ break;
+
+ case BCHFS_IOC_REINHERIT_ATTRS:
+ ret = bch2_ioc_reinherit_attrs(c, file, inode,
+ (void __user *) arg);
+ break;
+
+ case FS_IOC_GETVERSION:
+ ret = -ENOTTY;
+ break;
+
+ case FS_IOC_SETVERSION:
+ ret = -ENOTTY;
+ break;
+
+ case FS_IOC_GOINGDOWN:
+ ret = bch2_ioc_goingdown(c, (u32 __user *) arg);
+ break;
+
+ case BCH_IOCTL_SUBVOLUME_CREATE: {
+ struct bch_ioctl_subvolume i;
+
+ ret = copy_from_user(&i, (void __user *) arg, sizeof(i))
+ ? -EFAULT
+ : bch2_ioctl_subvolume_create(c, file, i);
+ break;
+ }
+
+ case BCH_IOCTL_SUBVOLUME_DESTROY: {
+ struct bch_ioctl_subvolume i;
+
+ ret = copy_from_user(&i, (void __user *) arg, sizeof(i))
+ ? -EFAULT
+ : bch2_ioctl_subvolume_destroy(c, file, i);
+ break;
+ }
+
+ default:
+ ret = bch2_fs_ioctl(c, cmd, (void __user *) arg);
+ break;
+ }
+
+ return bch2_err_class(ret);
+}
+
+#ifdef CONFIG_COMPAT
+long bch2_compat_fs_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ /* These are just misnamed, they actually get/put from/to user an int */
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return bch2_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs-ioctl.h b/fs/bcachefs/fs-ioctl.h
new file mode 100644
index 000000000000..54a9c21a3b83
--- /dev/null
+++ b/fs/bcachefs/fs-ioctl.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FS_IOCTL_H
+#define _BCACHEFS_FS_IOCTL_H
+
+/* Inode flags: */
+
+/* bcachefs inode flags -> vfs inode flags: */
+static const __maybe_unused unsigned bch_flags_to_vfs[] = {
+ [__BCH_INODE_SYNC] = S_SYNC,
+ [__BCH_INODE_IMMUTABLE] = S_IMMUTABLE,
+ [__BCH_INODE_APPEND] = S_APPEND,
+ [__BCH_INODE_NOATIME] = S_NOATIME,
+};
+
+/* bcachefs inode flags -> FS_IOC_GETFLAGS: */
+static const __maybe_unused unsigned bch_flags_to_uflags[] = {
+ [__BCH_INODE_SYNC] = FS_SYNC_FL,
+ [__BCH_INODE_IMMUTABLE] = FS_IMMUTABLE_FL,
+ [__BCH_INODE_APPEND] = FS_APPEND_FL,
+ [__BCH_INODE_NODUMP] = FS_NODUMP_FL,
+ [__BCH_INODE_NOATIME] = FS_NOATIME_FL,
+};
+
+/* bcachefs inode flags -> FS_IOC_FSGETXATTR: */
+static const __maybe_unused unsigned bch_flags_to_xflags[] = {
+ [__BCH_INODE_SYNC] = FS_XFLAG_SYNC,
+ [__BCH_INODE_IMMUTABLE] = FS_XFLAG_IMMUTABLE,
+ [__BCH_INODE_APPEND] = FS_XFLAG_APPEND,
+ [__BCH_INODE_NODUMP] = FS_XFLAG_NODUMP,
+ [__BCH_INODE_NOATIME] = FS_XFLAG_NOATIME,
+ //[__BCH_INODE_PROJINHERIT] = FS_XFLAG_PROJINHERIT;
+};
+
+#define set_flags(_map, _in, _out) \
+do { \
+ unsigned _i; \
+ \
+ for (_i = 0; _i < ARRAY_SIZE(_map); _i++) \
+ if ((_in) & (1 << _i)) \
+ (_out) |= _map[_i]; \
+ else \
+ (_out) &= ~_map[_i]; \
+} while (0)
+
+#define map_flags(_map, _in) \
+({ \
+ unsigned _out = 0; \
+ \
+ set_flags(_map, _in, _out); \
+ _out; \
+})
+
+#define map_flags_rev(_map, _in) \
+({ \
+ unsigned _i, _out = 0; \
+ \
+ for (_i = 0; _i < ARRAY_SIZE(_map); _i++) \
+ if ((_in) & _map[_i]) { \
+ (_out) |= 1 << _i; \
+ (_in) &= ~_map[_i]; \
+ } \
+ (_out); \
+})
+
+#define map_defined(_map) \
+({ \
+ unsigned _in = ~0; \
+ \
+ map_flags_rev(_map, _in); \
+})
+
+/* Set VFS inode flags from bcachefs inode: */
+static inline void bch2_inode_flags_to_vfs(struct bch_inode_info *inode)
+{
+ set_flags(bch_flags_to_vfs, inode->ei_inode.bi_flags, inode->v.i_flags);
+}
+
+long bch2_fs_file_ioctl(struct file *, unsigned, unsigned long);
+long bch2_compat_fs_ioctl(struct file *, unsigned, unsigned long);
+
+#endif /* _BCACHEFS_FS_IOCTL_H */
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
new file mode 100644
index 000000000000..6642b88c41a0
--- /dev/null
+++ b/fs/bcachefs/fs.c
@@ -0,0 +1,1980 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef NO_BCACHEFS_FS
+
+#include "bcachefs.h"
+#include "acl.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "chardev.h"
+#include "dirent.h"
+#include "errcode.h"
+#include "extents.h"
+#include "fs.h"
+#include "fs-common.h"
+#include "fs-io.h"
+#include "fs-ioctl.h"
+#include "fs-io-buffered.h"
+#include "fs-io-direct.h"
+#include "fs-io-pagecache.h"
+#include "fsck.h"
+#include "inode.h"
+#include "io_read.h"
+#include "journal.h"
+#include "keylist.h"
+#include "quota.h"
+#include "snapshot.h"
+#include "super.h"
+#include "xattr.h"
+
+#include <linux/aio.h>
+#include <linux/backing-dev.h>
+#include <linux/exportfs.h>
+#include <linux/fiemap.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/posix_acl.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/string.h>
+#include <linux/xattr.h>
+
+static struct kmem_cache *bch2_inode_cache;
+
+static void bch2_vfs_inode_init(struct btree_trans *, subvol_inum,
+ struct bch_inode_info *,
+ struct bch_inode_unpacked *,
+ struct bch_subvolume *);
+
+void bch2_inode_update_after_write(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ unsigned fields)
+{
+ struct bch_fs *c = trans->c;
+
+ BUG_ON(bi->bi_inum != inode->v.i_ino);
+
+ bch2_assert_pos_locked(trans, BTREE_ID_inodes,
+ POS(0, bi->bi_inum),
+ c->opts.inodes_use_key_cache);
+
+ set_nlink(&inode->v, bch2_inode_nlink_get(bi));
+ i_uid_write(&inode->v, bi->bi_uid);
+ i_gid_write(&inode->v, bi->bi_gid);
+ inode->v.i_mode = bi->bi_mode;
+
+ if (fields & ATTR_ATIME)
+ inode_set_atime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_atime));
+ if (fields & ATTR_MTIME)
+ inode_set_mtime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_mtime));
+ if (fields & ATTR_CTIME)
+ inode_set_ctime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_ctime));
+
+ inode->ei_inode = *bi;
+
+ bch2_inode_flags_to_vfs(inode);
+}
+
+int __must_check bch2_write_inode(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ inode_set_fn set,
+ void *p, unsigned fields)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter = { NULL };
+ struct bch_inode_unpacked inode_u;
+ int ret;
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode),
+ BTREE_ITER_INTENT) ?:
+ (set ? set(trans, inode, &inode_u, p) : 0) ?:
+ bch2_inode_write(trans, &iter, &inode_u) ?:
+ bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
+
+ /*
+ * the btree node lock protects inode->ei_inode, not ei_update_lock;
+ * this is important for inode updates via bchfs_write_index_update
+ */
+ if (!ret)
+ bch2_inode_update_after_write(trans, inode, &inode_u, fields);
+
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_fs_fatal_err_on(bch2_err_matches(ret, ENOENT), c,
+ "inode %u:%llu not found when updating",
+ inode_inum(inode).subvol,
+ inode_inum(inode).inum);
+
+ bch2_trans_put(trans);
+ return ret < 0 ? ret : 0;
+}
+
+int bch2_fs_quota_transfer(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct bch_qid new_qid,
+ unsigned qtypes,
+ enum quota_acct_mode mode)
+{
+ unsigned i;
+ int ret;
+
+ qtypes &= enabled_qtypes(c);
+
+ for (i = 0; i < QTYP_NR; i++)
+ if (new_qid.q[i] == inode->ei_qid.q[i])
+ qtypes &= ~(1U << i);
+
+ if (!qtypes)
+ return 0;
+
+ mutex_lock(&inode->ei_quota_lock);
+
+ ret = bch2_quota_transfer(c, qtypes, new_qid,
+ inode->ei_qid,
+ inode->v.i_blocks +
+ inode->ei_quota_reserved,
+ mode);
+ if (!ret)
+ for (i = 0; i < QTYP_NR; i++)
+ if (qtypes & (1 << i))
+ inode->ei_qid.q[i] = new_qid.q[i];
+
+ mutex_unlock(&inode->ei_quota_lock);
+
+ return ret;
+}
+
+static int bch2_iget5_test(struct inode *vinode, void *p)
+{
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ subvol_inum *inum = p;
+
+ return inode->ei_subvol == inum->subvol &&
+ inode->ei_inode.bi_inum == inum->inum;
+}
+
+static int bch2_iget5_set(struct inode *vinode, void *p)
+{
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ subvol_inum *inum = p;
+
+ inode->v.i_ino = inum->inum;
+ inode->ei_subvol = inum->subvol;
+ inode->ei_inode.bi_inum = inum->inum;
+ return 0;
+}
+
+static unsigned bch2_inode_hash(subvol_inum inum)
+{
+ return jhash_3words(inum.subvol, inum.inum >> 32, inum.inum, JHASH_INITVAL);
+}
+
+struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum)
+{
+ struct bch_inode_unpacked inode_u;
+ struct bch_inode_info *inode;
+ struct btree_trans *trans;
+ struct bch_subvolume subvol;
+ int ret;
+
+ inode = to_bch_ei(iget5_locked(c->vfs_sb,
+ bch2_inode_hash(inum),
+ bch2_iget5_test,
+ bch2_iget5_set,
+ &inum));
+ if (unlikely(!inode))
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->v.i_state & I_NEW))
+ return &inode->v;
+
+ trans = bch2_trans_get(c);
+ ret = lockrestart_do(trans,
+ bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
+ bch2_inode_find_by_inum_trans(trans, inum, &inode_u));
+
+ if (!ret)
+ bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
+ bch2_trans_put(trans);
+
+ if (ret) {
+ iget_failed(&inode->v);
+ return ERR_PTR(bch2_err_class(ret));
+ }
+
+ mutex_lock(&c->vfs_inodes_lock);
+ list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
+ mutex_unlock(&c->vfs_inodes_lock);
+
+ unlock_new_inode(&inode->v);
+
+ return &inode->v;
+}
+
+struct bch_inode_info *
+__bch2_create(struct mnt_idmap *idmap,
+ struct bch_inode_info *dir, struct dentry *dentry,
+ umode_t mode, dev_t rdev, subvol_inum snapshot_src,
+ unsigned flags)
+{
+ struct bch_fs *c = dir->v.i_sb->s_fs_info;
+ struct btree_trans *trans;
+ struct bch_inode_unpacked dir_u;
+ struct bch_inode_info *inode, *old;
+ struct bch_inode_unpacked inode_u;
+ struct posix_acl *default_acl = NULL, *acl = NULL;
+ subvol_inum inum;
+ struct bch_subvolume subvol;
+ u64 journal_seq = 0;
+ int ret;
+
+ /*
+ * preallocate acls + vfs inode before btree transaction, so that
+ * nothing can fail after the transaction succeeds:
+ */
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+ ret = posix_acl_create(&dir->v, &mode, &default_acl, &acl);
+ if (ret)
+ return ERR_PTR(ret);
+#endif
+ inode = to_bch_ei(new_inode(c->vfs_sb));
+ if (unlikely(!inode)) {
+ inode = ERR_PTR(-ENOMEM);
+ goto err;
+ }
+
+ bch2_inode_init_early(c, &inode_u);
+
+ if (!(flags & BCH_CREATE_TMPFILE))
+ mutex_lock(&dir->ei_update_lock);
+
+ trans = bch2_trans_get(c);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_create_trans(trans,
+ inode_inum(dir), &dir_u, &inode_u,
+ !(flags & BCH_CREATE_TMPFILE)
+ ? &dentry->d_name : NULL,
+ from_kuid(i_user_ns(&dir->v), current_fsuid()),
+ from_kgid(i_user_ns(&dir->v), current_fsgid()),
+ mode, rdev,
+ default_acl, acl, snapshot_src, flags) ?:
+ bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1,
+ KEY_TYPE_QUOTA_PREALLOC);
+ if (unlikely(ret))
+ goto err_before_quota;
+
+ inum.subvol = inode_u.bi_subvol ?: dir->ei_subvol;
+ inum.inum = inode_u.bi_inum;
+
+ ret = bch2_subvolume_get(trans, inum.subvol, true,
+ BTREE_ITER_WITH_UPDATES, &subvol) ?:
+ bch2_trans_commit(trans, NULL, &journal_seq, 0);
+ if (unlikely(ret)) {
+ bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
+ KEY_TYPE_QUOTA_WARN);
+err_before_quota:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ goto err_trans;
+ }
+
+ if (!(flags & BCH_CREATE_TMPFILE)) {
+ bch2_inode_update_after_write(trans, dir, &dir_u,
+ ATTR_MTIME|ATTR_CTIME);
+ mutex_unlock(&dir->ei_update_lock);
+ }
+
+ bch2_iget5_set(&inode->v, &inum);
+ bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
+
+ set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
+ set_cached_acl(&inode->v, ACL_TYPE_DEFAULT, default_acl);
+
+ /*
+ * we must insert the new inode into the inode cache before calling
+ * bch2_trans_exit() and dropping locks, else we could race with another
+ * thread pulling the inode in and modifying it:
+ */
+
+ inode->v.i_state |= I_CREATING;
+
+ old = to_bch_ei(inode_insert5(&inode->v,
+ bch2_inode_hash(inum),
+ bch2_iget5_test,
+ bch2_iget5_set,
+ &inum));
+ BUG_ON(!old);
+
+ if (unlikely(old != inode)) {
+ /*
+ * We raced, another process pulled the new inode into cache
+ * before us:
+ */
+ make_bad_inode(&inode->v);
+ iput(&inode->v);
+
+ inode = old;
+ } else {
+ mutex_lock(&c->vfs_inodes_lock);
+ list_add(&inode->ei_vfs_inode_list, &c->vfs_inodes_list);
+ mutex_unlock(&c->vfs_inodes_lock);
+ /*
+ * we really don't want insert_inode_locked2() to be setting
+ * I_NEW...
+ */
+ unlock_new_inode(&inode->v);
+ }
+
+ bch2_trans_put(trans);
+err:
+ posix_acl_release(default_acl);
+ posix_acl_release(acl);
+ return inode;
+err_trans:
+ if (!(flags & BCH_CREATE_TMPFILE))
+ mutex_unlock(&dir->ei_update_lock);
+
+ bch2_trans_put(trans);
+ make_bad_inode(&inode->v);
+ iput(&inode->v);
+ inode = ERR_PTR(ret);
+ goto err;
+}
+
+/* methods */
+
+static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct bch_fs *c = vdir->i_sb->s_fs_info;
+ struct bch_inode_info *dir = to_bch_ei(vdir);
+ struct bch_hash_info hash = bch2_hash_info_init(c, &dir->ei_inode);
+ struct inode *vinode = NULL;
+ subvol_inum inum = { .subvol = 1 };
+ int ret;
+
+ ret = bch2_dirent_lookup(c, inode_inum(dir), &hash,
+ &dentry->d_name, &inum);
+
+ if (!ret)
+ vinode = bch2_vfs_inode_get(c, inum);
+
+ return d_splice_alias(vinode, dentry);
+}
+
+static int bch2_mknod(struct mnt_idmap *idmap,
+ struct inode *vdir, struct dentry *dentry,
+ umode_t mode, dev_t rdev)
+{
+ struct bch_inode_info *inode =
+ __bch2_create(idmap, to_bch_ei(vdir), dentry, mode, rdev,
+ (subvol_inum) { 0 }, 0);
+
+ if (IS_ERR(inode))
+ return bch2_err_class(PTR_ERR(inode));
+
+ d_instantiate(dentry, &inode->v);
+ return 0;
+}
+
+static int bch2_create(struct mnt_idmap *idmap,
+ struct inode *vdir, struct dentry *dentry,
+ umode_t mode, bool excl)
+{
+ return bch2_mknod(idmap, vdir, dentry, mode|S_IFREG, 0);
+}
+
+static int __bch2_link(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ struct bch_inode_info *dir,
+ struct dentry *dentry)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct bch_inode_unpacked dir_u, inode_u;
+ int ret;
+
+ mutex_lock(&inode->ei_update_lock);
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_link_trans(trans,
+ inode_inum(dir), &dir_u,
+ inode_inum(inode), &inode_u,
+ &dentry->d_name));
+
+ if (likely(!ret)) {
+ bch2_inode_update_after_write(trans, dir, &dir_u,
+ ATTR_MTIME|ATTR_CTIME);
+ bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME);
+ }
+
+ bch2_trans_put(trans);
+ mutex_unlock(&inode->ei_update_lock);
+ return ret;
+}
+
+static int bch2_link(struct dentry *old_dentry, struct inode *vdir,
+ struct dentry *dentry)
+{
+ struct bch_fs *c = vdir->i_sb->s_fs_info;
+ struct bch_inode_info *dir = to_bch_ei(vdir);
+ struct bch_inode_info *inode = to_bch_ei(old_dentry->d_inode);
+ int ret;
+
+ lockdep_assert_held(&inode->v.i_rwsem);
+
+ ret = __bch2_link(c, inode, dir, dentry);
+ if (unlikely(ret))
+ return ret;
+
+ ihold(&inode->v);
+ d_instantiate(dentry, &inode->v);
+ return 0;
+}
+
+int __bch2_unlink(struct inode *vdir, struct dentry *dentry,
+ bool deleting_snapshot)
+{
+ struct bch_fs *c = vdir->i_sb->s_fs_info;
+ struct bch_inode_info *dir = to_bch_ei(vdir);
+ struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ struct bch_inode_unpacked dir_u, inode_u;
+ struct btree_trans *trans = bch2_trans_get(c);
+ int ret;
+
+ bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode);
+
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL,
+ bch2_unlink_trans(trans,
+ inode_inum(dir), &dir_u,
+ &inode_u, &dentry->d_name,
+ deleting_snapshot));
+ if (unlikely(ret))
+ goto err;
+
+ bch2_inode_update_after_write(trans, dir, &dir_u,
+ ATTR_MTIME|ATTR_CTIME);
+ bch2_inode_update_after_write(trans, inode, &inode_u,
+ ATTR_MTIME);
+
+ if (inode_u.bi_subvol) {
+ /*
+ * Subvolume deletion is asynchronous, but we still want to tell
+ * the VFS that it's been deleted here:
+ */
+ set_nlink(&inode->v, 0);
+ }
+err:
+ bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode);
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+static int bch2_unlink(struct inode *vdir, struct dentry *dentry)
+{
+ return __bch2_unlink(vdir, dentry, false);
+}
+
+static int bch2_symlink(struct mnt_idmap *idmap,
+ struct inode *vdir, struct dentry *dentry,
+ const char *symname)
+{
+ struct bch_fs *c = vdir->i_sb->s_fs_info;
+ struct bch_inode_info *dir = to_bch_ei(vdir), *inode;
+ int ret;
+
+ inode = __bch2_create(idmap, dir, dentry, S_IFLNK|S_IRWXUGO, 0,
+ (subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
+ if (IS_ERR(inode))
+ return bch2_err_class(PTR_ERR(inode));
+
+ inode_lock(&inode->v);
+ ret = page_symlink(&inode->v, symname, strlen(symname) + 1);
+ inode_unlock(&inode->v);
+
+ if (unlikely(ret))
+ goto err;
+
+ ret = filemap_write_and_wait_range(inode->v.i_mapping, 0, LLONG_MAX);
+ if (unlikely(ret))
+ goto err;
+
+ ret = __bch2_link(c, inode, dir, dentry);
+ if (unlikely(ret))
+ goto err;
+
+ d_instantiate(dentry, &inode->v);
+ return 0;
+err:
+ iput(&inode->v);
+ return ret;
+}
+
+static int bch2_mkdir(struct mnt_idmap *idmap,
+ struct inode *vdir, struct dentry *dentry, umode_t mode)
+{
+ return bch2_mknod(idmap, vdir, dentry, mode|S_IFDIR, 0);
+}
+
+static int bch2_rename2(struct mnt_idmap *idmap,
+ struct inode *src_vdir, struct dentry *src_dentry,
+ struct inode *dst_vdir, struct dentry *dst_dentry,
+ unsigned flags)
+{
+ struct bch_fs *c = src_vdir->i_sb->s_fs_info;
+ struct bch_inode_info *src_dir = to_bch_ei(src_vdir);
+ struct bch_inode_info *dst_dir = to_bch_ei(dst_vdir);
+ struct bch_inode_info *src_inode = to_bch_ei(src_dentry->d_inode);
+ struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode);
+ struct bch_inode_unpacked dst_dir_u, src_dir_u;
+ struct bch_inode_unpacked src_inode_u, dst_inode_u;
+ struct btree_trans *trans;
+ enum bch_rename_mode mode = flags & RENAME_EXCHANGE
+ ? BCH_RENAME_EXCHANGE
+ : dst_dentry->d_inode
+ ? BCH_RENAME_OVERWRITE : BCH_RENAME;
+ int ret;
+
+ if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE))
+ return -EINVAL;
+
+ if (mode == BCH_RENAME_OVERWRITE) {
+ ret = filemap_write_and_wait_range(src_inode->v.i_mapping,
+ 0, LLONG_MAX);
+ if (ret)
+ return ret;
+ }
+
+ trans = bch2_trans_get(c);
+
+ bch2_lock_inodes(INODE_UPDATE_LOCK,
+ src_dir,
+ dst_dir,
+ src_inode,
+ dst_inode);
+
+ if (inode_attr_changing(dst_dir, src_inode, Inode_opt_project)) {
+ ret = bch2_fs_quota_transfer(c, src_inode,
+ dst_dir->ei_qid,
+ 1 << QTYP_PRJ,
+ KEY_TYPE_QUOTA_PREALLOC);
+ if (ret)
+ goto err;
+ }
+
+ if (mode == BCH_RENAME_EXCHANGE &&
+ inode_attr_changing(src_dir, dst_inode, Inode_opt_project)) {
+ ret = bch2_fs_quota_transfer(c, dst_inode,
+ src_dir->ei_qid,
+ 1 << QTYP_PRJ,
+ KEY_TYPE_QUOTA_PREALLOC);
+ if (ret)
+ goto err;
+ }
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_rename_trans(trans,
+ inode_inum(src_dir), &src_dir_u,
+ inode_inum(dst_dir), &dst_dir_u,
+ &src_inode_u,
+ &dst_inode_u,
+ &src_dentry->d_name,
+ &dst_dentry->d_name,
+ mode));
+ if (unlikely(ret))
+ goto err;
+
+ BUG_ON(src_inode->v.i_ino != src_inode_u.bi_inum);
+ BUG_ON(dst_inode &&
+ dst_inode->v.i_ino != dst_inode_u.bi_inum);
+
+ bch2_inode_update_after_write(trans, src_dir, &src_dir_u,
+ ATTR_MTIME|ATTR_CTIME);
+
+ if (src_dir != dst_dir)
+ bch2_inode_update_after_write(trans, dst_dir, &dst_dir_u,
+ ATTR_MTIME|ATTR_CTIME);
+
+ bch2_inode_update_after_write(trans, src_inode, &src_inode_u,
+ ATTR_CTIME);
+
+ if (dst_inode)
+ bch2_inode_update_after_write(trans, dst_inode, &dst_inode_u,
+ ATTR_CTIME);
+err:
+ bch2_trans_put(trans);
+
+ bch2_fs_quota_transfer(c, src_inode,
+ bch_qid(&src_inode->ei_inode),
+ 1 << QTYP_PRJ,
+ KEY_TYPE_QUOTA_NOCHECK);
+ if (dst_inode)
+ bch2_fs_quota_transfer(c, dst_inode,
+ bch_qid(&dst_inode->ei_inode),
+ 1 << QTYP_PRJ,
+ KEY_TYPE_QUOTA_NOCHECK);
+
+ bch2_unlock_inodes(INODE_UPDATE_LOCK,
+ src_dir,
+ dst_dir,
+ src_inode,
+ dst_inode);
+
+ return ret;
+}
+
+static void bch2_setattr_copy(struct mnt_idmap *idmap,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ struct iattr *attr)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ unsigned int ia_valid = attr->ia_valid;
+
+ if (ia_valid & ATTR_UID)
+ bi->bi_uid = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
+ if (ia_valid & ATTR_GID)
+ bi->bi_gid = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
+
+ if (ia_valid & ATTR_SIZE)
+ bi->bi_size = attr->ia_size;
+
+ if (ia_valid & ATTR_ATIME)
+ bi->bi_atime = timespec_to_bch2_time(c, attr->ia_atime);
+ if (ia_valid & ATTR_MTIME)
+ bi->bi_mtime = timespec_to_bch2_time(c, attr->ia_mtime);
+ if (ia_valid & ATTR_CTIME)
+ bi->bi_ctime = timespec_to_bch2_time(c, attr->ia_ctime);
+
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = attr->ia_mode;
+ kgid_t gid = ia_valid & ATTR_GID
+ ? attr->ia_gid
+ : inode->v.i_gid;
+
+ if (!in_group_p(gid) &&
+ !capable_wrt_inode_uidgid(idmap, &inode->v, CAP_FSETID))
+ mode &= ~S_ISGID;
+ bi->bi_mode = mode;
+ }
+}
+
+int bch2_setattr_nonsize(struct mnt_idmap *idmap,
+ struct bch_inode_info *inode,
+ struct iattr *attr)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_qid qid;
+ struct btree_trans *trans;
+ struct btree_iter inode_iter = { NULL };
+ struct bch_inode_unpacked inode_u;
+ struct posix_acl *acl = NULL;
+ int ret;
+
+ mutex_lock(&inode->ei_update_lock);
+
+ qid = inode->ei_qid;
+
+ if (attr->ia_valid & ATTR_UID)
+ qid.q[QTYP_USR] = from_kuid(i_user_ns(&inode->v), attr->ia_uid);
+
+ if (attr->ia_valid & ATTR_GID)
+ qid.q[QTYP_GRP] = from_kgid(i_user_ns(&inode->v), attr->ia_gid);
+
+ ret = bch2_fs_quota_transfer(c, inode, qid, ~0,
+ KEY_TYPE_QUOTA_PREALLOC);
+ if (ret)
+ goto err;
+
+ trans = bch2_trans_get(c);
+retry:
+ bch2_trans_begin(trans);
+ kfree(acl);
+ acl = NULL;
+
+ ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
+ BTREE_ITER_INTENT);
+ if (ret)
+ goto btree_err;
+
+ bch2_setattr_copy(idmap, inode, &inode_u, attr);
+
+ if (attr->ia_valid & ATTR_MODE) {
+ ret = bch2_acl_chmod(trans, inode_inum(inode), &inode_u,
+ inode_u.bi_mode, &acl);
+ if (ret)
+ goto btree_err;
+ }
+
+ ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+btree_err:
+ bch2_trans_iter_exit(trans, &inode_iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+ if (unlikely(ret))
+ goto err_trans;
+
+ bch2_inode_update_after_write(trans, inode, &inode_u, attr->ia_valid);
+
+ if (acl)
+ set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl);
+err_trans:
+ bch2_trans_put(trans);
+err:
+ mutex_unlock(&inode->ei_update_lock);
+
+ return bch2_err_class(ret);
+}
+
+static int bch2_getattr(struct mnt_idmap *idmap,
+ const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned query_flags)
+{
+ struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry));
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+
+ stat->dev = inode->v.i_sb->s_dev;
+ stat->ino = inode->v.i_ino;
+ stat->mode = inode->v.i_mode;
+ stat->nlink = inode->v.i_nlink;
+ stat->uid = inode->v.i_uid;
+ stat->gid = inode->v.i_gid;
+ stat->rdev = inode->v.i_rdev;
+ stat->size = i_size_read(&inode->v);
+ stat->atime = inode_get_atime(&inode->v);
+ stat->mtime = inode_get_mtime(&inode->v);
+ stat->ctime = inode_get_ctime(&inode->v);
+ stat->blksize = block_bytes(c);
+ stat->blocks = inode->v.i_blocks;
+
+ if (request_mask & STATX_BTIME) {
+ stat->result_mask |= STATX_BTIME;
+ stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime);
+ }
+
+ if (inode->ei_inode.bi_flags & BCH_INODE_IMMUTABLE)
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ stat->attributes_mask |= STATX_ATTR_IMMUTABLE;
+
+ if (inode->ei_inode.bi_flags & BCH_INODE_APPEND)
+ stat->attributes |= STATX_ATTR_APPEND;
+ stat->attributes_mask |= STATX_ATTR_APPEND;
+
+ if (inode->ei_inode.bi_flags & BCH_INODE_NODUMP)
+ stat->attributes |= STATX_ATTR_NODUMP;
+ stat->attributes_mask |= STATX_ATTR_NODUMP;
+
+ return 0;
+}
+
+static int bch2_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *iattr)
+{
+ struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ int ret;
+
+ lockdep_assert_held(&inode->v.i_rwsem);
+
+ ret = setattr_prepare(idmap, dentry, iattr);
+ if (ret)
+ return ret;
+
+ return iattr->ia_valid & ATTR_SIZE
+ ? bchfs_truncate(idmap, inode, iattr)
+ : bch2_setattr_nonsize(idmap, inode, iattr);
+}
+
+static int bch2_tmpfile(struct mnt_idmap *idmap,
+ struct inode *vdir, struct file *file, umode_t mode)
+{
+ struct bch_inode_info *inode =
+ __bch2_create(idmap, to_bch_ei(vdir),
+ file->f_path.dentry, mode, 0,
+ (subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
+
+ if (IS_ERR(inode))
+ return bch2_err_class(PTR_ERR(inode));
+
+ d_mark_tmpfile(file, &inode->v);
+ d_instantiate(file->f_path.dentry, &inode->v);
+ return finish_open_simple(file, 0);
+}
+
+static int bch2_fill_extent(struct bch_fs *c,
+ struct fiemap_extent_info *info,
+ struct bkey_s_c k, unsigned flags)
+{
+ if (bkey_extent_is_direct_data(k.k)) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ int ret;
+
+ if (k.k->type == KEY_TYPE_reflink_v)
+ flags |= FIEMAP_EXTENT_SHARED;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ int flags2 = 0;
+ u64 offset = p.ptr.offset;
+
+ if (p.ptr.unwritten)
+ flags2 |= FIEMAP_EXTENT_UNWRITTEN;
+
+ if (p.crc.compression_type)
+ flags2 |= FIEMAP_EXTENT_ENCODED;
+ else
+ offset += p.crc.offset;
+
+ if ((offset & (block_sectors(c) - 1)) ||
+ (k.k->size & (block_sectors(c) - 1)))
+ flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
+
+ ret = fiemap_fill_next_extent(info,
+ bkey_start_offset(k.k) << 9,
+ offset << 9,
+ k.k->size << 9, flags|flags2);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ } else if (bkey_extent_is_inline_data(k.k)) {
+ return fiemap_fill_next_extent(info,
+ bkey_start_offset(k.k) << 9,
+ 0, k.k->size << 9,
+ flags|
+ FIEMAP_EXTENT_DATA_INLINE);
+ } else if (k.k->type == KEY_TYPE_reservation) {
+ return fiemap_fill_next_extent(info,
+ bkey_start_offset(k.k) << 9,
+ 0, k.k->size << 9,
+ flags|
+ FIEMAP_EXTENT_DELALLOC|
+ FIEMAP_EXTENT_UNWRITTEN);
+ } else {
+ BUG();
+ }
+}
+
+static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
+ u64 start, u64 len)
+{
+ struct bch_fs *c = vinode->i_sb->s_fs_info;
+ struct bch_inode_info *ei = to_bch_ei(vinode);
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf cur, prev;
+ struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
+ unsigned offset_into_extent, sectors;
+ bool have_extent = false;
+ u32 snapshot;
+ int ret = 0;
+
+ ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC);
+ if (ret)
+ return ret;
+
+ if (start + len < start)
+ return -EINVAL;
+
+ start >>= 9;
+
+ bch2_bkey_buf_init(&cur);
+ bch2_bkey_buf_init(&prev);
+ trans = bch2_trans_get(c);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, ei->ei_subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(ei->v.i_ino, start, snapshot), 0);
+
+ while (!(ret = btree_trans_too_many_iters(trans)) &&
+ (k = bch2_btree_iter_peek_upto(&iter, end)).k &&
+ !(ret = bkey_err(k))) {
+ enum btree_id data_btree = BTREE_ID_extents;
+
+ if (!bkey_extent_is_data(k.k) &&
+ k.k->type != KEY_TYPE_reservation) {
+ bch2_btree_iter_advance(&iter);
+ continue;
+ }
+
+ offset_into_extent = iter.pos.offset -
+ bkey_start_offset(k.k);
+ sectors = k.k->size - offset_into_extent;
+
+ bch2_bkey_buf_reassemble(&cur, c, k);
+
+ ret = bch2_read_indirect_extent(trans, &data_btree,
+ &offset_into_extent, &cur);
+ if (ret)
+ break;
+
+ k = bkey_i_to_s_c(cur.k);
+ bch2_bkey_buf_realloc(&prev, c, k.k->u64s);
+
+ sectors = min(sectors, k.k->size - offset_into_extent);
+
+ bch2_cut_front(POS(k.k->p.inode,
+ bkey_start_offset(k.k) +
+ offset_into_extent),
+ cur.k);
+ bch2_key_resize(&cur.k->k, sectors);
+ cur.k->k.p = iter.pos;
+ cur.k->k.p.offset += cur.k->k.size;
+
+ if (have_extent) {
+ bch2_trans_unlock(trans);
+ ret = bch2_fill_extent(c, info,
+ bkey_i_to_s_c(prev.k), 0);
+ if (ret)
+ break;
+ }
+
+ bkey_copy(prev.k, cur.k);
+ have_extent = true;
+
+ bch2_btree_iter_set_pos(&iter,
+ POS(iter.pos.inode, iter.pos.offset + sectors));
+ }
+ start = iter.pos.offset;
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ if (!ret && have_extent) {
+ bch2_trans_unlock(trans);
+ ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
+ FIEMAP_EXTENT_LAST);
+ }
+
+ bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&cur, c);
+ bch2_bkey_buf_exit(&prev, c);
+ return ret < 0 ? ret : 0;
+}
+
+static const struct vm_operations_struct bch_vm_ops = {
+ .fault = bch2_page_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = bch2_page_mkwrite,
+};
+
+static int bch2_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ file_accessed(file);
+
+ vma->vm_ops = &bch_vm_ops;
+ return 0;
+}
+
+/* Directories: */
+
+static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+ return generic_file_llseek_size(file, offset, whence,
+ S64_MAX, S64_MAX);
+}
+
+static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct bch_inode_info *inode = file_bch_inode(file);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ int ret;
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+ ret = bch2_readdir(c, inode_inum(inode), ctx);
+ if (ret)
+ bch_err_fn(c, ret);
+
+ return bch2_err_class(ret);
+}
+
+static const struct file_operations bch_file_operations = {
+ .llseek = bch2_llseek,
+ .read_iter = bch2_read_iter,
+ .write_iter = bch2_write_iter,
+ .mmap = bch2_mmap,
+ .open = generic_file_open,
+ .fsync = bch2_fsync,
+ .splice_read = filemap_splice_read,
+ .splice_write = iter_file_splice_write,
+ .fallocate = bch2_fallocate_dispatch,
+ .unlocked_ioctl = bch2_fs_file_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = bch2_compat_fs_ioctl,
+#endif
+ .remap_file_range = bch2_remap_file_range,
+};
+
+static const struct inode_operations bch_file_inode_operations = {
+ .getattr = bch2_getattr,
+ .setattr = bch2_setattr,
+ .fiemap = bch2_fiemap,
+ .listxattr = bch2_xattr_list,
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
+#endif
+};
+
+static const struct inode_operations bch_dir_inode_operations = {
+ .lookup = bch2_lookup,
+ .create = bch2_create,
+ .link = bch2_link,
+ .unlink = bch2_unlink,
+ .symlink = bch2_symlink,
+ .mkdir = bch2_mkdir,
+ .rmdir = bch2_unlink,
+ .mknod = bch2_mknod,
+ .rename = bch2_rename2,
+ .getattr = bch2_getattr,
+ .setattr = bch2_setattr,
+ .tmpfile = bch2_tmpfile,
+ .listxattr = bch2_xattr_list,
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
+#endif
+};
+
+static const struct file_operations bch_dir_file_operations = {
+ .llseek = bch2_dir_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = bch2_vfs_readdir,
+ .fsync = bch2_fsync,
+ .unlocked_ioctl = bch2_fs_file_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = bch2_compat_fs_ioctl,
+#endif
+};
+
+static const struct inode_operations bch_symlink_inode_operations = {
+ .get_link = page_get_link,
+ .getattr = bch2_getattr,
+ .setattr = bch2_setattr,
+ .listxattr = bch2_xattr_list,
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
+#endif
+};
+
+static const struct inode_operations bch_special_inode_operations = {
+ .getattr = bch2_getattr,
+ .setattr = bch2_setattr,
+ .listxattr = bch2_xattr_list,
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
+#endif
+};
+
+static const struct address_space_operations bch_address_space_operations = {
+ .read_folio = bch2_read_folio,
+ .writepages = bch2_writepages,
+ .readahead = bch2_readahead,
+ .dirty_folio = filemap_dirty_folio,
+ .write_begin = bch2_write_begin,
+ .write_end = bch2_write_end,
+ .invalidate_folio = bch2_invalidate_folio,
+ .release_folio = bch2_release_folio,
+ .direct_IO = noop_direct_IO,
+#ifdef CONFIG_MIGRATION
+ .migrate_folio = filemap_migrate_folio,
+#endif
+ .error_remove_page = generic_error_remove_page,
+};
+
+struct bcachefs_fid {
+ u64 inum;
+ u32 subvol;
+ u32 gen;
+} __packed;
+
+struct bcachefs_fid_with_parent {
+ struct bcachefs_fid fid;
+ struct bcachefs_fid dir;
+} __packed;
+
+static int bcachefs_fid_valid(int fh_len, int fh_type)
+{
+ switch (fh_type) {
+ case FILEID_BCACHEFS_WITHOUT_PARENT:
+ return fh_len == sizeof(struct bcachefs_fid) / sizeof(u32);
+ case FILEID_BCACHEFS_WITH_PARENT:
+ return fh_len == sizeof(struct bcachefs_fid_with_parent) / sizeof(u32);
+ default:
+ return false;
+ }
+}
+
+static struct bcachefs_fid bch2_inode_to_fid(struct bch_inode_info *inode)
+{
+ return (struct bcachefs_fid) {
+ .inum = inode->ei_inode.bi_inum,
+ .subvol = inode->ei_subvol,
+ .gen = inode->ei_inode.bi_generation,
+ };
+}
+
+static int bch2_encode_fh(struct inode *vinode, u32 *fh, int *len,
+ struct inode *vdir)
+{
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ struct bch_inode_info *dir = to_bch_ei(vdir);
+
+ if (*len < sizeof(struct bcachefs_fid_with_parent) / sizeof(u32))
+ return FILEID_INVALID;
+
+ if (!S_ISDIR(inode->v.i_mode) && dir) {
+ struct bcachefs_fid_with_parent *fid = (void *) fh;
+
+ fid->fid = bch2_inode_to_fid(inode);
+ fid->dir = bch2_inode_to_fid(dir);
+
+ *len = sizeof(*fid) / sizeof(u32);
+ return FILEID_BCACHEFS_WITH_PARENT;
+ } else {
+ struct bcachefs_fid *fid = (void *) fh;
+
+ *fid = bch2_inode_to_fid(inode);
+
+ *len = sizeof(*fid) / sizeof(u32);
+ return FILEID_BCACHEFS_WITHOUT_PARENT;
+ }
+}
+
+static struct inode *bch2_nfs_get_inode(struct super_block *sb,
+ struct bcachefs_fid fid)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ struct inode *vinode = bch2_vfs_inode_get(c, (subvol_inum) {
+ .subvol = fid.subvol,
+ .inum = fid.inum,
+ });
+ if (!IS_ERR(vinode) && vinode->i_generation != fid.gen) {
+ iput(vinode);
+ vinode = ERR_PTR(-ESTALE);
+ }
+ return vinode;
+}
+
+static struct dentry *bch2_fh_to_dentry(struct super_block *sb, struct fid *_fid,
+ int fh_len, int fh_type)
+{
+ struct bcachefs_fid *fid = (void *) _fid;
+
+ if (!bcachefs_fid_valid(fh_len, fh_type))
+ return NULL;
+
+ return d_obtain_alias(bch2_nfs_get_inode(sb, *fid));
+}
+
+static struct dentry *bch2_fh_to_parent(struct super_block *sb, struct fid *_fid,
+ int fh_len, int fh_type)
+{
+ struct bcachefs_fid_with_parent *fid = (void *) _fid;
+
+ if (!bcachefs_fid_valid(fh_len, fh_type) ||
+ fh_type != FILEID_BCACHEFS_WITH_PARENT)
+ return NULL;
+
+ return d_obtain_alias(bch2_nfs_get_inode(sb, fid->dir));
+}
+
+static struct dentry *bch2_get_parent(struct dentry *child)
+{
+ struct bch_inode_info *inode = to_bch_ei(child->d_inode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ subvol_inum parent_inum = {
+ .subvol = inode->ei_inode.bi_parent_subvol ?:
+ inode->ei_subvol,
+ .inum = inode->ei_inode.bi_dir,
+ };
+
+ if (!parent_inum.inum)
+ return NULL;
+
+ return d_obtain_alias(bch2_vfs_inode_get(c, parent_inum));
+}
+
+static int bch2_get_name(struct dentry *parent, char *name, struct dentry *child)
+{
+ struct bch_inode_info *inode = to_bch_ei(child->d_inode);
+ struct bch_inode_info *dir = to_bch_ei(parent->d_inode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct btree_trans *trans;
+ struct btree_iter iter1;
+ struct btree_iter iter2;
+ struct bkey_s_c k;
+ struct bkey_s_c_dirent d;
+ struct bch_inode_unpacked inode_u;
+ subvol_inum target;
+ u32 snapshot;
+ struct qstr dirent_name;
+ unsigned name_len = 0;
+ int ret;
+
+ if (!S_ISDIR(dir->v.i_mode))
+ return -EINVAL;
+
+ trans = bch2_trans_get(c);
+
+ bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents,
+ POS(dir->ei_inode.bi_inum, 0), 0);
+ bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents,
+ POS(dir->ei_inode.bi_inum, 0), 0);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, dir->ei_subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_btree_iter_set_snapshot(&iter1, snapshot);
+ bch2_btree_iter_set_snapshot(&iter2, snapshot);
+
+ ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u);
+ if (ret)
+ goto err;
+
+ if (inode_u.bi_dir == dir->ei_inode.bi_inum) {
+ bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
+
+ k = bch2_btree_iter_peek_slot(&iter1);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != KEY_TYPE_dirent) {
+ ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
+ goto err;
+ }
+
+ d = bkey_s_c_to_dirent(k);
+ ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target);
+ if (ret > 0)
+ ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
+ if (ret)
+ goto err;
+
+ if (target.subvol == inode->ei_subvol &&
+ target.inum == inode->ei_inode.bi_inum)
+ goto found;
+ } else {
+ /*
+ * File with multiple hardlinks and our backref is to the wrong
+ * directory - linear search:
+ */
+ for_each_btree_key_continue_norestart(iter2, 0, k, ret) {
+ if (k.k->p.inode > dir->ei_inode.bi_inum)
+ break;
+
+ if (k.k->type != KEY_TYPE_dirent)
+ continue;
+
+ d = bkey_s_c_to_dirent(k);
+ ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target);
+ if (ret < 0)
+ break;
+ if (ret)
+ continue;
+
+ if (target.subvol == inode->ei_subvol &&
+ target.inum == inode->ei_inode.bi_inum)
+ goto found;
+ }
+ }
+
+ ret = -ENOENT;
+ goto err;
+found:
+ dirent_name = bch2_dirent_get_name(d);
+
+ name_len = min_t(unsigned, dirent_name.len, NAME_MAX);
+ memcpy(name, dirent_name.name, name_len);
+ name[name_len] = '\0';
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_iter_exit(trans, &iter1);
+ bch2_trans_iter_exit(trans, &iter2);
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+static const struct export_operations bch_export_ops = {
+ .encode_fh = bch2_encode_fh,
+ .fh_to_dentry = bch2_fh_to_dentry,
+ .fh_to_parent = bch2_fh_to_parent,
+ .get_parent = bch2_get_parent,
+ .get_name = bch2_get_name,
+};
+
+static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ struct bch_subvolume *subvol)
+{
+ bch2_inode_update_after_write(trans, inode, bi, ~0);
+
+ if (BCH_SUBVOLUME_SNAP(subvol))
+ set_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
+ else
+ clear_bit(EI_INODE_SNAPSHOT, &inode->ei_flags);
+
+ inode->v.i_blocks = bi->bi_sectors;
+ inode->v.i_ino = bi->bi_inum;
+ inode->v.i_rdev = bi->bi_dev;
+ inode->v.i_generation = bi->bi_generation;
+ inode->v.i_size = bi->bi_size;
+
+ inode->ei_flags = 0;
+ inode->ei_quota_reserved = 0;
+ inode->ei_qid = bch_qid(bi);
+ inode->ei_subvol = inum.subvol;
+
+ inode->v.i_mapping->a_ops = &bch_address_space_operations;
+
+ switch (inode->v.i_mode & S_IFMT) {
+ case S_IFREG:
+ inode->v.i_op = &bch_file_inode_operations;
+ inode->v.i_fop = &bch_file_operations;
+ break;
+ case S_IFDIR:
+ inode->v.i_op = &bch_dir_inode_operations;
+ inode->v.i_fop = &bch_dir_file_operations;
+ break;
+ case S_IFLNK:
+ inode_nohighmem(&inode->v);
+ inode->v.i_op = &bch_symlink_inode_operations;
+ break;
+ default:
+ init_special_inode(&inode->v, inode->v.i_mode, inode->v.i_rdev);
+ inode->v.i_op = &bch_special_inode_operations;
+ break;
+ }
+
+ mapping_set_large_folios(inode->v.i_mapping);
+}
+
+static struct inode *bch2_alloc_inode(struct super_block *sb)
+{
+ struct bch_inode_info *inode;
+
+ inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
+ if (!inode)
+ return NULL;
+
+ inode_init_once(&inode->v);
+ mutex_init(&inode->ei_update_lock);
+ two_state_lock_init(&inode->ei_pagecache_lock);
+ INIT_LIST_HEAD(&inode->ei_vfs_inode_list);
+ mutex_init(&inode->ei_quota_lock);
+
+ return &inode->v;
+}
+
+static void bch2_i_callback(struct rcu_head *head)
+{
+ struct inode *vinode = container_of(head, struct inode, i_rcu);
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+
+ kmem_cache_free(bch2_inode_cache, inode);
+}
+
+static void bch2_destroy_inode(struct inode *vinode)
+{
+ call_rcu(&vinode->i_rcu, bch2_i_callback);
+}
+
+static int inode_update_times_fn(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
+{
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+
+ bi->bi_atime = timespec_to_bch2_time(c, inode_get_atime(&inode->v));
+ bi->bi_mtime = timespec_to_bch2_time(c, inode_get_mtime(&inode->v));
+ bi->bi_ctime = timespec_to_bch2_time(c, inode_get_ctime(&inode->v));
+
+ return 0;
+}
+
+static int bch2_vfs_write_inode(struct inode *vinode,
+ struct writeback_control *wbc)
+{
+ struct bch_fs *c = vinode->i_sb->s_fs_info;
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ int ret;
+
+ mutex_lock(&inode->ei_update_lock);
+ ret = bch2_write_inode(c, inode, inode_update_times_fn, NULL,
+ ATTR_ATIME|ATTR_MTIME|ATTR_CTIME);
+ mutex_unlock(&inode->ei_update_lock);
+
+ return bch2_err_class(ret);
+}
+
+static void bch2_evict_inode(struct inode *vinode)
+{
+ struct bch_fs *c = vinode->i_sb->s_fs_info;
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+
+ truncate_inode_pages_final(&inode->v.i_data);
+
+ clear_inode(&inode->v);
+
+ BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
+
+ if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
+ bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
+ KEY_TYPE_QUOTA_WARN);
+ bch2_quota_acct(c, inode->ei_qid, Q_INO, -1,
+ KEY_TYPE_QUOTA_WARN);
+ bch2_inode_rm(c, inode_inum(inode));
+ }
+
+ mutex_lock(&c->vfs_inodes_lock);
+ list_del_init(&inode->ei_vfs_inode_list);
+ mutex_unlock(&c->vfs_inodes_lock);
+}
+
+void bch2_evict_subvolume_inodes(struct bch_fs *c, snapshot_id_list *s)
+{
+ struct bch_inode_info *inode, **i;
+ DARRAY(struct bch_inode_info *) grabbed;
+ bool clean_pass = false, this_pass_clean;
+
+ /*
+ * Initially, we scan for inodes without I_DONTCACHE, then mark them to
+ * be pruned with d_mark_dontcache().
+ *
+ * Once we've had a clean pass where we didn't find any inodes without
+ * I_DONTCACHE, we wait for them to be freed:
+ */
+
+ darray_init(&grabbed);
+ darray_make_room(&grabbed, 1024);
+again:
+ cond_resched();
+ this_pass_clean = true;
+
+ mutex_lock(&c->vfs_inodes_lock);
+ list_for_each_entry(inode, &c->vfs_inodes_list, ei_vfs_inode_list) {
+ if (!snapshot_list_has_id(s, inode->ei_subvol))
+ continue;
+
+ if (!(inode->v.i_state & I_DONTCACHE) &&
+ !(inode->v.i_state & I_FREEING) &&
+ igrab(&inode->v)) {
+ this_pass_clean = false;
+
+ if (darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN)) {
+ iput(&inode->v);
+ break;
+ }
+ } else if (clean_pass && this_pass_clean) {
+ wait_queue_head_t *wq = bit_waitqueue(&inode->v.i_state, __I_NEW);
+ DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW);
+
+ prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
+ mutex_unlock(&c->vfs_inodes_lock);
+
+ schedule();
+ finish_wait(wq, &wait.wq_entry);
+ goto again;
+ }
+ }
+ mutex_unlock(&c->vfs_inodes_lock);
+
+ darray_for_each(grabbed, i) {
+ inode = *i;
+ d_mark_dontcache(&inode->v);
+ d_prune_aliases(&inode->v);
+ iput(&inode->v);
+ }
+ grabbed.nr = 0;
+
+ if (!clean_pass || !this_pass_clean) {
+ clean_pass = this_pass_clean;
+ goto again;
+ }
+
+ darray_exit(&grabbed);
+}
+
+static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct bch_fs *c = sb->s_fs_info;
+ struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
+ unsigned shift = sb->s_blocksize_bits - 9;
+ /*
+ * this assumes inodes take up 64 bytes, which is a decent average
+ * number:
+ */
+ u64 avail_inodes = ((usage.capacity - usage.used) << 3);
+ u64 fsid;
+
+ buf->f_type = BCACHEFS_STATFS_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+ buf->f_blocks = usage.capacity >> shift;
+ buf->f_bfree = usage.free >> shift;
+ buf->f_bavail = avail_factor(usage.free) >> shift;
+
+ buf->f_files = usage.nr_inodes + avail_inodes;
+ buf->f_ffree = avail_inodes;
+
+ fsid = le64_to_cpup((void *) c->sb.user_uuid.b) ^
+ le64_to_cpup((void *) c->sb.user_uuid.b + sizeof(u64));
+ buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
+ buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
+ buf->f_namelen = BCH_NAME_MAX;
+
+ return 0;
+}
+
+static int bch2_sync_fs(struct super_block *sb, int wait)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ int ret;
+
+ if (c->opts.journal_flush_disabled)
+ return 0;
+
+ if (!wait) {
+ bch2_journal_flush_async(&c->journal, NULL);
+ return 0;
+ }
+
+ ret = bch2_journal_flush(&c->journal);
+ return bch2_err_class(ret);
+}
+
+static struct bch_fs *bch2_path_to_fs(const char *path)
+{
+ struct bch_fs *c;
+ dev_t dev;
+ int ret;
+
+ ret = lookup_bdev(path, &dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ c = bch2_dev_to_fs(dev);
+ if (c)
+ closure_put(&c->cl);
+ return c ?: ERR_PTR(-ENOENT);
+}
+
+static char **split_devs(const char *_dev_name, unsigned *nr)
+{
+ char *dev_name = NULL, **devs = NULL, *s;
+ size_t i = 0, nr_devs = 0;
+
+ dev_name = kstrdup(_dev_name, GFP_KERNEL);
+ if (!dev_name)
+ return NULL;
+
+ for (s = dev_name; s; s = strchr(s + 1, ':'))
+ nr_devs++;
+
+ devs = kcalloc(nr_devs + 1, sizeof(const char *), GFP_KERNEL);
+ if (!devs) {
+ kfree(dev_name);
+ return NULL;
+ }
+
+ while ((s = strsep(&dev_name, ":")))
+ devs[i++] = s;
+
+ *nr = nr_devs;
+ return devs;
+}
+
+static int bch2_remount(struct super_block *sb, int *flags, char *data)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ struct bch_opts opts = bch2_opts_empty();
+ int ret;
+
+ opt_set(opts, read_only, (*flags & SB_RDONLY) != 0);
+
+ ret = bch2_parse_mount_opts(c, &opts, data);
+ if (ret)
+ goto err;
+
+ if (opts.read_only != c->opts.read_only) {
+ down_write(&c->state_lock);
+
+ if (opts.read_only) {
+ bch2_fs_read_only(c);
+
+ sb->s_flags |= SB_RDONLY;
+ } else {
+ ret = bch2_fs_read_write(c);
+ if (ret) {
+ bch_err(c, "error going rw: %i", ret);
+ up_write(&c->state_lock);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ sb->s_flags &= ~SB_RDONLY;
+ }
+
+ c->opts.read_only = opts.read_only;
+
+ up_write(&c->state_lock);
+ }
+
+ if (opt_defined(opts, errors))
+ c->opts.errors = opts.errors;
+err:
+ return bch2_err_class(ret);
+}
+
+static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
+{
+ struct bch_fs *c = root->d_sb->s_fs_info;
+ struct bch_dev *ca;
+ unsigned i;
+ bool first = true;
+
+ for_each_online_member(ca, c, i) {
+ if (!first)
+ seq_putc(seq, ':');
+ first = false;
+ seq_puts(seq, "/dev/");
+ seq_puts(seq, ca->name);
+ }
+
+ return 0;
+}
+
+static int bch2_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct bch_fs *c = root->d_sb->s_fs_info;
+ enum bch_opt_id i;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ for (i = 0; i < bch2_opts_nr; i++) {
+ const struct bch_option *opt = &bch2_opt_table[i];
+ u64 v = bch2_opt_get_by_id(&c->opts, i);
+
+ if (!(opt->flags & OPT_MOUNT))
+ continue;
+
+ if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
+ continue;
+
+ printbuf_reset(&buf);
+ bch2_opt_to_text(&buf, c, c->disk_sb.sb, opt, v,
+ OPT_SHOW_MOUNT_STYLE);
+ seq_putc(seq, ',');
+ seq_puts(seq, buf.buf);
+ }
+
+ if (buf.allocation_failure)
+ ret = -ENOMEM;
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static void bch2_put_super(struct super_block *sb)
+{
+ struct bch_fs *c = sb->s_fs_info;
+
+ __bch2_fs_stop(c);
+}
+
+/*
+ * bcachefs doesn't currently integrate intwrite freeze protection but the
+ * internal write references serve the same purpose. Therefore reuse the
+ * read-only transition code to perform the quiesce. The caveat is that we don't
+ * currently have the ability to block tasks that want a write reference while
+ * the superblock is frozen. This is fine for now, but we should either add
+ * blocking support or find a way to integrate sb_start_intwrite() and friends.
+ */
+static int bch2_freeze(struct super_block *sb)
+{
+ struct bch_fs *c = sb->s_fs_info;
+
+ down_write(&c->state_lock);
+ bch2_fs_read_only(c);
+ up_write(&c->state_lock);
+ return 0;
+}
+
+static int bch2_unfreeze(struct super_block *sb)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ int ret;
+
+ down_write(&c->state_lock);
+ ret = bch2_fs_read_write(c);
+ up_write(&c->state_lock);
+ return ret;
+}
+
+static const struct super_operations bch_super_operations = {
+ .alloc_inode = bch2_alloc_inode,
+ .destroy_inode = bch2_destroy_inode,
+ .write_inode = bch2_vfs_write_inode,
+ .evict_inode = bch2_evict_inode,
+ .sync_fs = bch2_sync_fs,
+ .statfs = bch2_statfs,
+ .show_devname = bch2_show_devname,
+ .show_options = bch2_show_options,
+ .remount_fs = bch2_remount,
+ .put_super = bch2_put_super,
+ .freeze_fs = bch2_freeze,
+ .unfreeze_fs = bch2_unfreeze,
+};
+
+static int bch2_set_super(struct super_block *s, void *data)
+{
+ s->s_fs_info = data;
+ return 0;
+}
+
+static int bch2_noset_super(struct super_block *s, void *data)
+{
+ return -EBUSY;
+}
+
+static int bch2_test_super(struct super_block *s, void *data)
+{
+ struct bch_fs *c = s->s_fs_info;
+ struct bch_fs **devs = data;
+ unsigned i;
+
+ if (!c)
+ return false;
+
+ for (i = 0; devs[i]; i++)
+ if (c != devs[i])
+ return false;
+ return true;
+}
+
+static struct dentry *bch2_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ struct bch_fs *c;
+ struct bch_dev *ca;
+ struct super_block *sb;
+ struct inode *vinode;
+ struct bch_opts opts = bch2_opts_empty();
+ char **devs;
+ struct bch_fs **devs_to_fs = NULL;
+ unsigned i, nr_devs;
+ int ret;
+
+ opt_set(opts, read_only, (flags & SB_RDONLY) != 0);
+
+ ret = bch2_parse_mount_opts(NULL, &opts, data);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (!dev_name || strlen(dev_name) == 0)
+ return ERR_PTR(-EINVAL);
+
+ devs = split_devs(dev_name, &nr_devs);
+ if (!devs)
+ return ERR_PTR(-ENOMEM);
+
+ devs_to_fs = kcalloc(nr_devs + 1, sizeof(void *), GFP_KERNEL);
+ if (!devs_to_fs) {
+ sb = ERR_PTR(-ENOMEM);
+ goto got_sb;
+ }
+
+ for (i = 0; i < nr_devs; i++)
+ devs_to_fs[i] = bch2_path_to_fs(devs[i]);
+
+ sb = sget(fs_type, bch2_test_super, bch2_noset_super,
+ flags|SB_NOSEC, devs_to_fs);
+ if (!IS_ERR(sb))
+ goto got_sb;
+
+ c = bch2_fs_open(devs, nr_devs, opts);
+ if (IS_ERR(c)) {
+ sb = ERR_CAST(c);
+ goto got_sb;
+ }
+
+ /* Some options can't be parsed until after the fs is started: */
+ ret = bch2_parse_mount_opts(c, &opts, data);
+ if (ret) {
+ bch2_fs_stop(c);
+ sb = ERR_PTR(ret);
+ goto got_sb;
+ }
+
+ bch2_opts_apply(&c->opts, opts);
+
+ sb = sget(fs_type, NULL, bch2_set_super, flags|SB_NOSEC, c);
+ if (IS_ERR(sb))
+ bch2_fs_stop(c);
+got_sb:
+ kfree(devs_to_fs);
+ kfree(devs[0]);
+ kfree(devs);
+
+ if (IS_ERR(sb)) {
+ ret = PTR_ERR(sb);
+ ret = bch2_err_class(ret);
+ return ERR_PTR(ret);
+ }
+
+ c = sb->s_fs_info;
+
+ if (sb->s_root) {
+ if ((flags ^ sb->s_flags) & SB_RDONLY) {
+ ret = -EBUSY;
+ goto err_put_super;
+ }
+ goto out;
+ }
+
+ sb->s_blocksize = block_bytes(c);
+ sb->s_blocksize_bits = ilog2(block_bytes(c));
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_op = &bch_super_operations;
+ sb->s_export_op = &bch_export_ops;
+#ifdef CONFIG_BCACHEFS_QUOTA
+ sb->s_qcop = &bch2_quotactl_operations;
+ sb->s_quota_types = QTYPE_MASK_USR|QTYPE_MASK_GRP|QTYPE_MASK_PRJ;
+#endif
+ sb->s_xattr = bch2_xattr_handlers;
+ sb->s_magic = BCACHEFS_STATFS_MAGIC;
+ sb->s_time_gran = c->sb.nsec_per_time_unit;
+ sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
+ sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec);
+ c->vfs_sb = sb;
+ strscpy(sb->s_id, c->name, sizeof(sb->s_id));
+
+ ret = super_setup_bdi(sb);
+ if (ret)
+ goto err_put_super;
+
+ sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
+
+ for_each_online_member(ca, c, i) {
+ struct block_device *bdev = ca->disk_sb.bdev;
+
+ /* XXX: create an anonymous device for multi device filesystems */
+ sb->s_bdev = bdev;
+ sb->s_dev = bdev->bd_dev;
+ percpu_ref_put(&ca->io_ref);
+ break;
+ }
+
+ c->dev = sb->s_dev;
+
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+ if (c->opts.acl)
+ sb->s_flags |= SB_POSIXACL;
+#endif
+
+ sb->s_shrink.seeks = 0;
+
+ vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM);
+ ret = PTR_ERR_OR_ZERO(vinode);
+ if (ret) {
+ bch_err_msg(c, ret, "mounting: error getting root inode");
+ goto err_put_super;
+ }
+
+ sb->s_root = d_make_root(vinode);
+ if (!sb->s_root) {
+ bch_err(c, "error mounting: error allocating root dentry");
+ ret = -ENOMEM;
+ goto err_put_super;
+ }
+
+ sb->s_flags |= SB_ACTIVE;
+out:
+ return dget(sb->s_root);
+
+err_put_super:
+ sb->s_fs_info = NULL;
+ c->vfs_sb = NULL;
+ deactivate_locked_super(sb);
+ bch2_fs_stop(c);
+ return ERR_PTR(bch2_err_class(ret));
+}
+
+static void bch2_kill_sb(struct super_block *sb)
+{
+ struct bch_fs *c = sb->s_fs_info;
+
+ if (c)
+ c->vfs_sb = NULL;
+ generic_shutdown_super(sb);
+ if (c)
+ bch2_fs_free(c);
+}
+
+static struct file_system_type bcache_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "bcachefs",
+ .mount = bch2_mount,
+ .kill_sb = bch2_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+
+MODULE_ALIAS_FS("bcachefs");
+
+void bch2_vfs_exit(void)
+{
+ unregister_filesystem(&bcache_fs_type);
+ kmem_cache_destroy(bch2_inode_cache);
+}
+
+int __init bch2_vfs_init(void)
+{
+ int ret = -ENOMEM;
+
+ bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
+ if (!bch2_inode_cache)
+ goto err;
+
+ ret = register_filesystem(&bcache_fs_type);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ bch2_vfs_exit();
+ return ret;
+}
+
+#endif /* NO_BCACHEFS_FS */
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
new file mode 100644
index 000000000000..5edf1d4b9e6b
--- /dev/null
+++ b/fs/bcachefs/fs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FS_H
+#define _BCACHEFS_FS_H
+
+#include "inode.h"
+#include "opts.h"
+#include "str_hash.h"
+#include "quota_types.h"
+#include "two_state_shared_lock.h"
+
+#include <linux/seqlock.h>
+#include <linux/stat.h>
+
+struct bch_inode_info {
+ struct inode v;
+ struct list_head ei_vfs_inode_list;
+ unsigned long ei_flags;
+
+ struct mutex ei_update_lock;
+ u64 ei_quota_reserved;
+ unsigned long ei_last_dirtied;
+ two_state_lock_t ei_pagecache_lock;
+
+ struct mutex ei_quota_lock;
+ struct bch_qid ei_qid;
+
+ u32 ei_subvol;
+
+ /*
+ * When we've been doing nocow writes we'll need to issue flushes to the
+ * underlying block devices
+ *
+ * XXX: a device may have had a flush issued by some other codepath. It
+ * would be better to keep for each device a sequence number that's
+ * incremented when we isusue a cache flush, and track here the sequence
+ * number that needs flushing.
+ */
+ struct bch_devs_mask ei_devs_need_flush;
+
+ /* copy of inode in btree: */
+ struct bch_inode_unpacked ei_inode;
+};
+
+#define bch2_pagecache_add_put(i) bch2_two_state_unlock(&i->ei_pagecache_lock, 0)
+#define bch2_pagecache_add_tryget(i) bch2_two_state_trylock(&i->ei_pagecache_lock, 0)
+#define bch2_pagecache_add_get(i) bch2_two_state_lock(&i->ei_pagecache_lock, 0)
+
+#define bch2_pagecache_block_put(i) bch2_two_state_unlock(&i->ei_pagecache_lock, 1)
+#define bch2_pagecache_block_get(i) bch2_two_state_lock(&i->ei_pagecache_lock, 1)
+
+static inline subvol_inum inode_inum(struct bch_inode_info *inode)
+{
+ return (subvol_inum) {
+ .subvol = inode->ei_subvol,
+ .inum = inode->ei_inode.bi_inum,
+ };
+}
+
+/*
+ * Set if we've gotten a btree error for this inode, and thus the vfs inode and
+ * btree inode may be inconsistent:
+ */
+#define EI_INODE_ERROR 0
+
+/*
+ * Set in the inode is in a snapshot subvolume - we don't do quota accounting in
+ * those:
+ */
+#define EI_INODE_SNAPSHOT 1
+
+#define to_bch_ei(_inode) \
+ container_of_or_null(_inode, struct bch_inode_info, v)
+
+static inline int ptrcmp(void *l, void *r)
+{
+ return cmp_int(l, r);
+}
+
+enum bch_inode_lock_op {
+ INODE_LOCK = (1U << 0),
+ INODE_PAGECACHE_BLOCK = (1U << 1),
+ INODE_UPDATE_LOCK = (1U << 2),
+};
+
+#define bch2_lock_inodes(_locks, ...) \
+do { \
+ struct bch_inode_info *a[] = { NULL, __VA_ARGS__ }; \
+ unsigned i; \
+ \
+ bubble_sort(&a[1], ARRAY_SIZE(a) - 1, ptrcmp); \
+ \
+ for (i = 1; i < ARRAY_SIZE(a); i++) \
+ if (a[i] != a[i - 1]) { \
+ if ((_locks) & INODE_LOCK) \
+ down_write_nested(&a[i]->v.i_rwsem, i); \
+ if ((_locks) & INODE_PAGECACHE_BLOCK) \
+ bch2_pagecache_block_get(a[i]);\
+ if ((_locks) & INODE_UPDATE_LOCK) \
+ mutex_lock_nested(&a[i]->ei_update_lock, i);\
+ } \
+} while (0)
+
+#define bch2_unlock_inodes(_locks, ...) \
+do { \
+ struct bch_inode_info *a[] = { NULL, __VA_ARGS__ }; \
+ unsigned i; \
+ \
+ bubble_sort(&a[1], ARRAY_SIZE(a) - 1, ptrcmp); \
+ \
+ for (i = 1; i < ARRAY_SIZE(a); i++) \
+ if (a[i] != a[i - 1]) { \
+ if ((_locks) & INODE_LOCK) \
+ up_write(&a[i]->v.i_rwsem); \
+ if ((_locks) & INODE_PAGECACHE_BLOCK) \
+ bch2_pagecache_block_put(a[i]);\
+ if ((_locks) & INODE_UPDATE_LOCK) \
+ mutex_unlock(&a[i]->ei_update_lock); \
+ } \
+} while (0)
+
+static inline struct bch_inode_info *file_bch_inode(struct file *file)
+{
+ return to_bch_ei(file_inode(file));
+}
+
+static inline bool inode_attr_changing(struct bch_inode_info *dir,
+ struct bch_inode_info *inode,
+ enum inode_opt_id id)
+{
+ return !(inode->ei_inode.bi_fields_set & (1 << id)) &&
+ bch2_inode_opt_get(&dir->ei_inode, id) !=
+ bch2_inode_opt_get(&inode->ei_inode, id);
+}
+
+static inline bool inode_attrs_changing(struct bch_inode_info *dir,
+ struct bch_inode_info *inode)
+{
+ unsigned id;
+
+ for (id = 0; id < Inode_opt_nr; id++)
+ if (inode_attr_changing(dir, inode, id))
+ return true;
+
+ return false;
+}
+
+struct bch_inode_unpacked;
+
+#ifndef NO_BCACHEFS_FS
+
+struct bch_inode_info *
+__bch2_create(struct mnt_idmap *, struct bch_inode_info *,
+ struct dentry *, umode_t, dev_t, subvol_inum, unsigned);
+
+int bch2_fs_quota_transfer(struct bch_fs *,
+ struct bch_inode_info *,
+ struct bch_qid,
+ unsigned,
+ enum quota_acct_mode);
+
+static inline int bch2_set_projid(struct bch_fs *c,
+ struct bch_inode_info *inode,
+ u32 projid)
+{
+ struct bch_qid qid = inode->ei_qid;
+
+ qid.q[QTYP_PRJ] = projid;
+
+ return bch2_fs_quota_transfer(c, inode, qid,
+ 1 << QTYP_PRJ,
+ KEY_TYPE_QUOTA_PREALLOC);
+}
+
+struct inode *bch2_vfs_inode_get(struct bch_fs *, subvol_inum);
+
+/* returns 0 if we want to do the update, or error is passed up */
+typedef int (*inode_set_fn)(struct btree_trans *,
+ struct bch_inode_info *,
+ struct bch_inode_unpacked *, void *);
+
+void bch2_inode_update_after_write(struct btree_trans *,
+ struct bch_inode_info *,
+ struct bch_inode_unpacked *,
+ unsigned);
+int __must_check bch2_write_inode(struct bch_fs *, struct bch_inode_info *,
+ inode_set_fn, void *, unsigned);
+
+int bch2_setattr_nonsize(struct mnt_idmap *,
+ struct bch_inode_info *,
+ struct iattr *);
+int __bch2_unlink(struct inode *, struct dentry *, bool);
+
+void bch2_evict_subvolume_inodes(struct bch_fs *, snapshot_id_list *);
+
+void bch2_vfs_exit(void);
+int bch2_vfs_init(void);
+
+#else
+
+#define bch2_inode_update_after_write(_trans, _inode, _inode_u, _fields) ({ do {} while (0); })
+
+static inline void bch2_evict_subvolume_inodes(struct bch_fs *c,
+ snapshot_id_list *s) {}
+static inline void bch2_vfs_exit(void) {}
+static inline int bch2_vfs_init(void) { return 0; }
+
+#endif /* NO_BCACHEFS_FS */
+
+#endif /* _BCACHEFS_FS_H */
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
new file mode 100644
index 000000000000..b8f9e7475dc5
--- /dev/null
+++ b/fs/bcachefs/fsck.c
@@ -0,0 +1,2417 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "darray.h"
+#include "dirent.h"
+#include "error.h"
+#include "fs-common.h"
+#include "fsck.h"
+#include "inode.h"
+#include "keylist.h"
+#include "recovery.h"
+#include "snapshot.h"
+#include "super.h"
+#include "xattr.h"
+
+#include <linux/bsearch.h>
+#include <linux/dcache.h> /* struct qstr */
+
+#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
+
+/*
+ * XXX: this is handling transaction restarts without returning
+ * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
+ */
+static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
+ u32 snapshot)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 sectors = 0;
+ int ret;
+
+ for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
+ SPOS(inum, 0, snapshot),
+ POS(inum, U64_MAX),
+ 0, k, ret)
+ if (bkey_extent_is_allocation(k.k))
+ sectors += k.k->size;
+
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret ?: sectors;
+}
+
+static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
+ u32 snapshot)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_dirent d;
+ u64 subdirs = 0;
+ int ret;
+
+ for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
+ SPOS(inum, 0, snapshot),
+ POS(inum, U64_MAX),
+ 0, k, ret) {
+ if (k.k->type != KEY_TYPE_dirent)
+ continue;
+
+ d = bkey_s_c_to_dirent(k);
+ if (d.v->d_type == DT_DIR)
+ subdirs++;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret ?: subdirs;
+}
+
+static int __snapshot_lookup_subvol(struct btree_trans *trans, u32 snapshot,
+ u32 *subvol)
+{
+ struct bch_snapshot s;
+ int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots,
+ POS(0, snapshot), 0,
+ snapshot, &s);
+ if (!ret)
+ *subvol = le32_to_cpu(s.subvol);
+ else if (bch2_err_matches(ret, ENOENT))
+ bch_err(trans->c, "snapshot %u not found", snapshot);
+ return ret;
+
+}
+
+static int __subvol_lookup(struct btree_trans *trans, u32 subvol,
+ u32 *snapshot, u64 *inum)
+{
+ struct bch_subvolume s;
+ int ret;
+
+ ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
+
+ *snapshot = le32_to_cpu(s.snapshot);
+ *inum = le64_to_cpu(s.inode);
+ return ret;
+}
+
+static int subvol_lookup(struct btree_trans *trans, u32 subvol,
+ u32 *snapshot, u64 *inum)
+{
+ return lockrestart_do(trans, __subvol_lookup(trans, subvol, snapshot, inum));
+}
+
+static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
+ struct bch_inode_unpacked *inode)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
+ POS(0, inode_nr),
+ BTREE_ITER_ALL_SNAPSHOTS);
+ k = bch2_btree_iter_peek(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
+ ret = -BCH_ERR_ENOENT_inode;
+ goto err;
+ }
+
+ ret = bch2_inode_unpack(k, inode);
+err:
+ bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int __lookup_inode(struct btree_trans *trans, u64 inode_nr,
+ struct bch_inode_unpacked *inode,
+ u32 *snapshot)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+ SPOS(0, inode_nr, *snapshot), 0);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ ret = bkey_is_inode(k.k)
+ ? bch2_inode_unpack(k, inode)
+ : -BCH_ERR_ENOENT_inode;
+ if (!ret)
+ *snapshot = iter.pos.snapshot;
+err:
+ bch_err_msg(trans->c, ret, "fetching inode %llu:%u", inode_nr, *snapshot);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
+ struct bch_inode_unpacked *inode,
+ u32 *snapshot)
+{
+ return lockrestart_do(trans, __lookup_inode(trans, inode_nr, inode, snapshot));
+}
+
+static int __lookup_dirent(struct btree_trans *trans,
+ struct bch_hash_info hash_info,
+ subvol_inum dir, struct qstr *name,
+ u64 *target, unsigned *type)
+{
+ struct btree_iter iter;
+ struct bkey_s_c_dirent d;
+ int ret;
+
+ ret = bch2_hash_lookup(trans, &iter, bch2_dirent_hash_desc,
+ &hash_info, dir, name, 0);
+ if (ret)
+ return ret;
+
+ d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
+ *target = le64_to_cpu(d.v->d_inum);
+ *type = d.v->d_type;
+ bch2_trans_iter_exit(trans, &iter);
+ return 0;
+}
+
+static int __write_inode(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode,
+ u32 snapshot)
+{
+ struct bkey_inode_buf *inode_p =
+ bch2_trans_kmalloc(trans, sizeof(*inode_p));
+
+ if (IS_ERR(inode_p))
+ return PTR_ERR(inode_p);
+
+ bch2_inode_pack(inode_p, inode);
+ inode_p->inode.k.p.snapshot = snapshot;
+
+ return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
+ &inode_p->inode.k_i,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+}
+
+static int fsck_write_inode(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode,
+ u32 snapshot)
+{
+ int ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ __write_inode(trans, inode, snapshot));
+ if (ret)
+ bch_err_fn(trans->c, ret);
+ return ret;
+}
+
+static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bch_inode_unpacked dir_inode;
+ struct bch_hash_info dir_hash_info;
+ int ret;
+
+ ret = lookup_first_inode(trans, pos.inode, &dir_inode);
+ if (ret)
+ goto err;
+
+ dir_hash_info = bch2_hash_info_init(c, &dir_inode);
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
+
+ ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
+ &dir_hash_info, &iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/* Get lost+found, create if it doesn't exist: */
+static int lookup_lostfound(struct btree_trans *trans, u32 subvol,
+ struct bch_inode_unpacked *lostfound)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked root;
+ struct bch_hash_info root_hash_info;
+ struct qstr lostfound_str = QSTR("lost+found");
+ subvol_inum root_inum = { .subvol = subvol };
+ u64 inum = 0;
+ unsigned d_type = 0;
+ u32 snapshot;
+ int ret;
+
+ ret = __subvol_lookup(trans, subvol, &snapshot, &root_inum.inum);
+ if (ret)
+ return ret;
+
+ ret = __lookup_inode(trans, root_inum.inum, &root, &snapshot);
+ if (ret)
+ return ret;
+
+ root_hash_info = bch2_hash_info_init(c, &root);
+
+ ret = __lookup_dirent(trans, root_hash_info, root_inum,
+ &lostfound_str, &inum, &d_type);
+ if (bch2_err_matches(ret, ENOENT)) {
+ bch_notice(c, "creating lost+found");
+ goto create_lostfound;
+ }
+
+ bch_err_fn(c, ret);
+ if (ret)
+ return ret;
+
+ if (d_type != DT_DIR) {
+ bch_err(c, "error looking up lost+found: not a directory");
+ return -BCH_ERR_ENOENT_not_directory;
+ }
+
+ /*
+ * The bch2_check_dirents pass has already run, dangling dirents
+ * shouldn't exist here:
+ */
+ return __lookup_inode(trans, inum, lostfound, &snapshot);
+
+create_lostfound:
+ bch2_inode_init_early(c, lostfound);
+
+ ret = bch2_create_trans(trans, root_inum, &root,
+ lostfound, &lostfound_str,
+ 0, 0, S_IFDIR|0700, 0, NULL, NULL,
+ (subvol_inum) { }, 0);
+ bch_err_msg(c, ret, "creating lost+found");
+ return ret;
+}
+
+static int __reattach_inode(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode,
+ u32 inode_snapshot)
+{
+ struct bch_hash_info dir_hash;
+ struct bch_inode_unpacked lostfound;
+ char name_buf[20];
+ struct qstr name;
+ u64 dir_offset = 0;
+ u32 subvol;
+ int ret;
+
+ ret = __snapshot_lookup_subvol(trans, inode_snapshot, &subvol);
+ if (ret)
+ return ret;
+
+ ret = lookup_lostfound(trans, subvol, &lostfound);
+ if (ret)
+ return ret;
+
+ if (S_ISDIR(inode->bi_mode)) {
+ lostfound.bi_nlink++;
+
+ ret = __write_inode(trans, &lostfound, U32_MAX);
+ if (ret)
+ return ret;
+ }
+
+ dir_hash = bch2_hash_info_init(trans->c, &lostfound);
+
+ snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
+ name = (struct qstr) QSTR(name_buf);
+
+ ret = bch2_dirent_create(trans,
+ (subvol_inum) {
+ .subvol = subvol,
+ .inum = lostfound.bi_inum,
+ },
+ &dir_hash,
+ inode_d_type(inode),
+ &name, inode->bi_inum, &dir_offset,
+ BCH_HASH_SET_MUST_CREATE);
+ if (ret)
+ return ret;
+
+ inode->bi_dir = lostfound.bi_inum;
+ inode->bi_dir_offset = dir_offset;
+
+ return __write_inode(trans, inode, inode_snapshot);
+}
+
+static int reattach_inode(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode,
+ u32 inode_snapshot)
+{
+ int ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL,
+ __reattach_inode(trans, inode, inode_snapshot));
+ bch_err_msg(trans->c, ret, "reattaching inode %llu", inode->bi_inum);
+ return ret;
+}
+
+static int remove_backpointer(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode)
+{
+ struct btree_iter iter;
+ struct bkey_s_c_dirent d;
+ int ret;
+
+ d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
+ POS(inode->bi_dir, inode->bi_dir_offset), 0,
+ dirent);
+ ret = bkey_err(d) ?:
+ __remove_dirent(trans, d.k->p);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+struct snapshots_seen_entry {
+ u32 id;
+ u32 equiv;
+};
+
+struct snapshots_seen {
+ struct bpos pos;
+ DARRAY(struct snapshots_seen_entry) ids;
+};
+
+static inline void snapshots_seen_exit(struct snapshots_seen *s)
+{
+ darray_exit(&s->ids);
+}
+
+static inline void snapshots_seen_init(struct snapshots_seen *s)
+{
+ memset(s, 0, sizeof(*s));
+}
+
+static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
+{
+ struct snapshots_seen_entry *i, n = {
+ .id = id,
+ .equiv = bch2_snapshot_equiv(c, id),
+ };
+ int ret = 0;
+
+ darray_for_each(s->ids, i) {
+ if (i->id == id)
+ return 0;
+ if (i->id > id)
+ break;
+ }
+
+ ret = darray_insert_item(&s->ids, i - s->ids.data, n);
+ if (ret)
+ bch_err(c, "error reallocating snapshots_seen table (size %zu)",
+ s->ids.size);
+ return ret;
+}
+
+static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
+ enum btree_id btree_id, struct bpos pos)
+{
+ struct snapshots_seen_entry *i, n = {
+ .id = pos.snapshot,
+ .equiv = bch2_snapshot_equiv(c, pos.snapshot),
+ };
+ int ret = 0;
+
+ if (!bkey_eq(s->pos, pos))
+ s->ids.nr = 0;
+
+ s->pos = pos;
+ s->pos.snapshot = n.equiv;
+
+ darray_for_each(s->ids, i) {
+ if (i->id == n.id)
+ return 0;
+
+ /*
+ * We currently don't rigorously track for snapshot cleanup
+ * needing to be run, so it shouldn't be a fsck error yet:
+ */
+ if (i->equiv == n.equiv) {
+ bch_err(c, "snapshot deletion did not finish:\n"
+ " duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
+ bch2_btree_ids[btree_id],
+ pos.inode, pos.offset,
+ i->id, n.id, n.equiv);
+ return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
+ }
+ }
+
+ ret = darray_push(&s->ids, n);
+ if (ret)
+ bch_err(c, "error reallocating snapshots_seen table (size %zu)",
+ s->ids.size);
+ return ret;
+}
+
+/**
+ * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
+ * and @ancestor hasn't been overwritten in @seen
+ *
+ * @c: filesystem handle
+ * @seen: list of snapshot ids already seen at current position
+ * @id: descendent snapshot id
+ * @ancestor: ancestor snapshot id
+ *
+ * Returns: whether key in @ancestor snapshot is visible in @id snapshot
+ */
+static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
+ u32 id, u32 ancestor)
+{
+ ssize_t i;
+
+ EBUG_ON(id > ancestor);
+ EBUG_ON(!bch2_snapshot_is_equiv(c, id));
+ EBUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
+
+ /* @ancestor should be the snapshot most recently added to @seen */
+ EBUG_ON(ancestor != seen->pos.snapshot);
+ EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv);
+
+ if (id == ancestor)
+ return true;
+
+ if (!bch2_snapshot_is_ancestor(c, id, ancestor))
+ return false;
+
+ /*
+ * We know that @id is a descendant of @ancestor, we're checking if
+ * we've seen a key that overwrote @ancestor - i.e. also a descendent of
+ * @ascestor and with @id as a descendent.
+ *
+ * But we already know that we're scanning IDs between @id and @ancestor
+ * numerically, since snapshot ID lists are kept sorted, so if we find
+ * an id that's an ancestor of @id we're done:
+ */
+
+ for (i = seen->ids.nr - 2;
+ i >= 0 && seen->ids.data[i].equiv >= id;
+ --i)
+ if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv))
+ return false;
+
+ return true;
+}
+
+/**
+ * ref_visible - given a key with snapshot id @src that points to a key with
+ * snapshot id @dst, test whether there is some snapshot in which @dst is
+ * visible.
+ *
+ * @c: filesystem handle
+ * @s: list of snapshot IDs already seen at @src
+ * @src: snapshot ID of src key
+ * @dst: snapshot ID of dst key
+ * Returns: true if there is some snapshot in which @dst is visible
+ *
+ * Assumes we're visiting @src keys in natural key order
+ */
+static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
+ u32 src, u32 dst)
+{
+ return dst <= src
+ ? key_visible_in_snapshot(c, s, dst, src)
+ : bch2_snapshot_is_ancestor(c, src, dst);
+}
+
+static int ref_visible2(struct bch_fs *c,
+ u32 src, struct snapshots_seen *src_seen,
+ u32 dst, struct snapshots_seen *dst_seen)
+{
+ src = bch2_snapshot_equiv(c, src);
+ dst = bch2_snapshot_equiv(c, dst);
+
+ if (dst > src) {
+ swap(dst, src);
+ swap(dst_seen, src_seen);
+ }
+ return key_visible_in_snapshot(c, src_seen, dst, src);
+}
+
+#define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \
+ for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \
+ (_i)->snapshot <= (_snapshot); _i++) \
+ if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
+
+struct inode_walker_entry {
+ struct bch_inode_unpacked inode;
+ u32 snapshot;
+ bool seen_this_pos;
+ u64 count;
+};
+
+struct inode_walker {
+ bool first_this_inode;
+ bool recalculate_sums;
+ struct bpos last_pos;
+
+ DARRAY(struct inode_walker_entry) inodes;
+};
+
+static void inode_walker_exit(struct inode_walker *w)
+{
+ darray_exit(&w->inodes);
+}
+
+static struct inode_walker inode_walker_init(void)
+{
+ return (struct inode_walker) { 0, };
+}
+
+static int add_inode(struct bch_fs *c, struct inode_walker *w,
+ struct bkey_s_c inode)
+{
+ struct bch_inode_unpacked u;
+
+ BUG_ON(bch2_inode_unpack(inode, &u));
+
+ return darray_push(&w->inodes, ((struct inode_walker_entry) {
+ .inode = u,
+ .snapshot = bch2_snapshot_equiv(c, inode.k->p.snapshot),
+ }));
+}
+
+static int get_inodes_all_snapshots(struct btree_trans *trans,
+ struct inode_walker *w, u64 inum)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 restart_count = trans->restart_count;
+ int ret;
+
+ w->recalculate_sums = false;
+ w->inodes.nr = 0;
+
+ for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, inum),
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ if (k.k->p.offset != inum)
+ break;
+
+ if (bkey_is_inode(k.k))
+ add_inode(c, w, k);
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ return ret;
+
+ w->first_this_inode = true;
+
+ return trans_was_restarted(trans, restart_count);
+}
+
+static struct inode_walker_entry *
+lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w,
+ u32 snapshot, bool is_whiteout)
+{
+ struct inode_walker_entry *i;
+
+ snapshot = bch2_snapshot_equiv(c, snapshot);
+
+ darray_for_each(w->inodes, i)
+ if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
+ goto found;
+
+ return NULL;
+found:
+ BUG_ON(snapshot > i->snapshot);
+
+ if (snapshot != i->snapshot && !is_whiteout) {
+ struct inode_walker_entry new = *i;
+ size_t pos;
+ int ret;
+
+ new.snapshot = snapshot;
+ new.count = 0;
+
+ bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u",
+ w->last_pos.inode, snapshot, i->snapshot);
+
+ while (i > w->inodes.data && i[-1].snapshot > snapshot)
+ --i;
+
+ pos = i - w->inodes.data;
+ ret = darray_insert_item(&w->inodes, pos, new);
+ if (ret)
+ return ERR_PTR(ret);
+
+ i = w->inodes.data + pos;
+ }
+
+ return i;
+}
+
+static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
+ struct inode_walker *w, struct bpos pos,
+ bool is_whiteout)
+{
+ if (w->last_pos.inode != pos.inode) {
+ int ret = get_inodes_all_snapshots(trans, w, pos.inode);
+ if (ret)
+ return ERR_PTR(ret);
+ } else if (bkey_cmp(w->last_pos, pos)) {
+ struct inode_walker_entry *i;
+
+ darray_for_each(w->inodes, i)
+ i->seen_this_pos = false;
+
+ }
+
+ w->last_pos = pos;
+
+ return lookup_inode_for_snapshot(trans->c, w, pos.snapshot, is_whiteout);
+}
+
+static int __get_visible_inodes(struct btree_trans *trans,
+ struct inode_walker *w,
+ struct snapshots_seen *s,
+ u64 inum)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ w->inodes.nr = 0;
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+
+ if (k.k->p.offset != inum)
+ break;
+
+ if (!ref_visible(c, s, s->pos.snapshot, equiv))
+ continue;
+
+ if (bkey_is_inode(k.k))
+ add_inode(c, w, k);
+
+ if (equiv >= s->pos.snapshot)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+static int check_key_has_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
+ "key in missing snapshot: %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int hash_redo_key(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ struct bch_hash_info *hash_info,
+ struct btree_iter *k_iter, struct bkey_s_c k)
+{
+ struct bkey_i *delete;
+ struct bkey_i *tmp;
+
+ delete = bch2_trans_kmalloc(trans, sizeof(*delete));
+ if (IS_ERR(delete))
+ return PTR_ERR(delete);
+
+ tmp = bch2_bkey_make_mut_noupdate(trans, k);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ bkey_init(&delete->k);
+ delete->k.p = k_iter->pos;
+ return bch2_btree_iter_traverse(k_iter) ?:
+ bch2_trans_update(trans, k_iter, delete, 0) ?:
+ bch2_hash_set_snapshot(trans, desc, hash_info,
+ (subvol_inum) { 0, k.k->p.inode },
+ k.k->p.snapshot, tmp,
+ BCH_HASH_SET_MUST_CREATE,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
+}
+
+static int hash_check_key(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ struct bch_hash_info *hash_info,
+ struct btree_iter *k_iter, struct bkey_s_c hash_k)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter = { NULL };
+ struct printbuf buf = PRINTBUF;
+ struct bkey_s_c k;
+ u64 hash;
+ int ret = 0;
+
+ if (hash_k.k->type != desc.key_type)
+ return 0;
+
+ hash = desc.hash_bkey(hash_info, hash_k);
+
+ if (likely(hash == hash_k.k->p.offset))
+ return 0;
+
+ if (hash_k.k->p.offset < hash)
+ goto bad_hash;
+
+ for_each_btree_key_norestart(trans, iter, desc.btree_id,
+ SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
+ BTREE_ITER_SLOTS, k, ret) {
+ if (bkey_eq(k.k->p, hash_k.k->p))
+ break;
+
+ if (fsck_err_on(k.k->type == desc.key_type &&
+ !desc.cmp_bkey(k, hash_k), c,
+ "duplicate hash table keys:\n%s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, hash_k),
+ buf.buf))) {
+ ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0) ?: 1;
+ break;
+ }
+
+ if (bkey_deleted(k.k)) {
+ bch2_trans_iter_exit(trans, &iter);
+ goto bad_hash;
+ }
+ }
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+ return ret;
+bad_hash:
+ if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
+ bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
+ ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
+ bch_err_fn(c, ret);
+ if (ret)
+ return ret;
+ ret = -BCH_ERR_transaction_restart_nested;
+ }
+fsck_err:
+ goto out;
+}
+
+static int check_inode(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct bch_inode_unpacked *prev,
+ struct snapshots_seen *s,
+ bool full)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked u;
+ bool do_update = false;
+ int ret;
+
+ ret = check_key_has_snapshot(trans, iter, k);
+ if (ret < 0)
+ goto err;
+ if (ret)
+ return 0;
+
+ ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
+ if (ret)
+ goto err;
+
+ if (!bkey_is_inode(k.k))
+ return 0;
+
+ BUG_ON(bch2_inode_unpack(k, &u));
+
+ if (!full &&
+ !(u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|
+ BCH_INODE_I_SECTORS_DIRTY|
+ BCH_INODE_UNLINKED)))
+ return 0;
+
+ if (prev->bi_inum != u.bi_inum)
+ *prev = u;
+
+ if (fsck_err_on(prev->bi_hash_seed != u.bi_hash_seed ||
+ inode_d_type(prev) != inode_d_type(&u), c,
+ "inodes in different snapshots don't match")) {
+ bch_err(c, "repair not implemented yet");
+ return -EINVAL;
+ }
+
+ if ((u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED)) &&
+ bch2_key_has_snapshot_overwrites(trans, BTREE_ID_inodes, k.k->p)) {
+ struct bpos new_min_pos;
+
+ ret = bch2_propagate_key_to_snapshot_leaves(trans, iter->btree_id, k, &new_min_pos);
+ if (ret)
+ goto err;
+
+ u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED;
+
+ ret = __write_inode(trans, &u, iter->pos.snapshot);
+ bch_err_msg(c, ret, "in fsck updating inode");
+ if (ret)
+ return ret;
+
+ if (!bpos_eq(new_min_pos, POS_MIN))
+ bch2_btree_iter_set_pos(iter, bpos_predecessor(new_min_pos));
+ return 0;
+ }
+
+ if (u.bi_flags & BCH_INODE_UNLINKED &&
+ (!c->sb.clean ||
+ fsck_err(c, "filesystem marked clean, but inode %llu unlinked",
+ u.bi_inum))) {
+ bch2_trans_unlock(trans);
+ bch2_fs_lazy_rw(c);
+
+ ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
+ bch_err_msg(c, ret, "in fsck deleting inode");
+ return ret;
+ }
+
+ if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY &&
+ (!c->sb.clean ||
+ fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty",
+ u.bi_inum))) {
+ bch_verbose(c, "truncating inode %llu", u.bi_inum);
+
+ bch2_trans_unlock(trans);
+ bch2_fs_lazy_rw(c);
+
+ /*
+ * XXX: need to truncate partial blocks too here - or ideally
+ * just switch units to bytes and that issue goes away
+ */
+ ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
+ SPOS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9,
+ iter->pos.snapshot),
+ POS(u.bi_inum, U64_MAX),
+ 0, NULL);
+ bch_err_msg(c, ret, "in fsck truncating inode");
+ if (ret)
+ return ret;
+
+ /*
+ * We truncated without our normal sector accounting hook, just
+ * make sure we recalculate it:
+ */
+ u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
+
+ u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
+ do_update = true;
+ }
+
+ if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY &&
+ (!c->sb.clean ||
+ fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty",
+ u.bi_inum))) {
+ s64 sectors;
+
+ bch_verbose(c, "recounting sectors for inode %llu",
+ u.bi_inum);
+
+ sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
+ if (sectors < 0) {
+ bch_err_msg(c, sectors, "in fsck recounting inode sectors");
+ return sectors;
+ }
+
+ u.bi_sectors = sectors;
+ u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
+ do_update = true;
+ }
+
+ if (u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) {
+ u.bi_dir = 0;
+ u.bi_dir_offset = 0;
+ u.bi_flags &= ~BCH_INODE_BACKPTR_UNTRUSTED;
+ do_update = true;
+ }
+
+ if (do_update) {
+ ret = __write_inode(trans, &u, iter->pos.snapshot);
+ bch_err_msg(c, ret, "in fsck updating inode");
+ if (ret)
+ return ret;
+ }
+err:
+fsck_err:
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+noinline_for_stack
+int bch2_check_inodes(struct bch_fs *c)
+{
+ bool full = c->opts.fsck;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bch_inode_unpacked prev = { 0 };
+ struct snapshots_seen s;
+ struct bkey_s_c k;
+ int ret;
+
+ snapshots_seen_init(&s);
+
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
+ POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_inode(trans, &iter, k, &prev, &s, full));
+
+ snapshots_seen_exit(&s);
+ bch2_trans_put(trans);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos pos)
+{
+ return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
+}
+
+static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
+ struct bkey_s_c_dirent d)
+{
+ return inode->bi_dir == d.k->p.inode &&
+ inode->bi_dir_offset == d.k->p.offset;
+}
+
+static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *inode)
+{
+ return d.v->d_type == DT_SUBVOL
+ ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
+ : le64_to_cpu(d.v->d_inum) == inode->bi_inum;
+}
+
+static int inode_backpointer_exists(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode,
+ u32 snapshot)
+{
+ struct btree_iter iter;
+ struct bkey_s_c_dirent d;
+ int ret;
+
+ d = dirent_get_by_pos(trans, &iter,
+ SPOS(inode->bi_dir, inode->bi_dir_offset, snapshot));
+ ret = bkey_err(d);
+ if (ret)
+ return bch2_err_matches(ret, ENOENT) ? 0 : ret;
+
+ ret = dirent_points_to_inode(d, inode);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
+{
+ struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
+ u32 restart_count = trans->restart_count;
+ int ret = 0;
+ s64 count2;
+
+ darray_for_each(w->inodes, i) {
+ if (i->inode.bi_sectors == i->count)
+ continue;
+
+ count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
+
+ if (w->recalculate_sums)
+ i->count = count2;
+
+ if (i->count != count2) {
+ bch_err(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
+ w->last_pos.inode, i->snapshot, i->count, count2);
+ return -BCH_ERR_internal_fsck_err;
+ }
+
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
+ "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
+ w->last_pos.inode, i->snapshot,
+ i->inode.bi_sectors, i->count)) {
+ i->inode.bi_sectors = i->count;
+ ret = fsck_write_inode(trans, &i->inode, i->snapshot);
+ if (ret)
+ break;
+ }
+ }
+fsck_err:
+ bch_err_fn(c, ret);
+ return ret ?: trans_was_restarted(trans, restart_count);
+}
+
+struct extent_end {
+ u32 snapshot;
+ u64 offset;
+ struct snapshots_seen seen;
+};
+
+struct extent_ends {
+ struct bpos last_pos;
+ DARRAY(struct extent_end) e;
+};
+
+static void extent_ends_reset(struct extent_ends *extent_ends)
+{
+ struct extent_end *i;
+
+ darray_for_each(extent_ends->e, i)
+ snapshots_seen_exit(&i->seen);
+
+ extent_ends->e.nr = 0;
+}
+
+static void extent_ends_exit(struct extent_ends *extent_ends)
+{
+ extent_ends_reset(extent_ends);
+ darray_exit(&extent_ends->e);
+}
+
+static void extent_ends_init(struct extent_ends *extent_ends)
+{
+ memset(extent_ends, 0, sizeof(*extent_ends));
+}
+
+static int extent_ends_at(struct bch_fs *c,
+ struct extent_ends *extent_ends,
+ struct snapshots_seen *seen,
+ struct bkey_s_c k)
+{
+ struct extent_end *i, n = (struct extent_end) {
+ .offset = k.k->p.offset,
+ .snapshot = k.k->p.snapshot,
+ .seen = *seen,
+ };
+
+ n.seen.ids.data = kmemdup(seen->ids.data,
+ sizeof(seen->ids.data[0]) * seen->ids.size,
+ GFP_KERNEL);
+ if (!n.seen.ids.data)
+ return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
+
+ darray_for_each(extent_ends->e, i) {
+ if (i->snapshot == k.k->p.snapshot) {
+ snapshots_seen_exit(&i->seen);
+ *i = n;
+ return 0;
+ }
+
+ if (i->snapshot >= k.k->p.snapshot)
+ break;
+ }
+
+ return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
+}
+
+static int overlapping_extents_found(struct btree_trans *trans,
+ enum btree_id btree,
+ struct bpos pos1, struct snapshots_seen *pos1_seen,
+ struct bkey pos2,
+ bool *fixed,
+ struct extent_end *extent_end)
+{
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+ struct btree_iter iter1, iter2 = { NULL };
+ struct bkey_s_c k1, k2;
+ int ret;
+
+ BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
+
+ bch2_trans_iter_init(trans, &iter1, btree, pos1,
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_NOT_EXTENTS);
+ k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
+ ret = bkey_err(k1);
+ if (ret)
+ goto err;
+
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, k1);
+
+ if (!bpos_eq(pos1, k1.k->p)) {
+ prt_str(&buf, "\n wanted\n ");
+ bch2_bpos_to_text(&buf, pos1);
+ prt_str(&buf, "\n ");
+ bch2_bkey_to_text(&buf, &pos2);
+
+ bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
+ __func__, buf.buf);
+ ret = -BCH_ERR_internal_fsck_err;
+ goto err;
+ }
+
+ bch2_trans_copy_iter(&iter2, &iter1);
+
+ while (1) {
+ bch2_btree_iter_advance(&iter2);
+
+ k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX));
+ ret = bkey_err(k2);
+ if (ret)
+ goto err;
+
+ if (bpos_ge(k2.k->p, pos2.p))
+ break;
+ }
+
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, k2);
+
+ if (bpos_gt(k2.k->p, pos2.p) ||
+ pos2.size != k2.k->size) {
+ bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
+ __func__, buf.buf);
+ ret = -BCH_ERR_internal_fsck_err;
+ goto err;
+ }
+
+ prt_printf(&buf, "\n overwriting %s extent",
+ pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
+
+ if (fsck_err(c, "overlapping extents%s", buf.buf)) {
+ struct btree_iter *old_iter = &iter1;
+ struct disk_reservation res = { 0 };
+
+ if (pos1.snapshot < pos2.p.snapshot) {
+ old_iter = &iter2;
+ swap(k1, k2);
+ }
+
+ trans->extra_journal_res += bch2_bkey_sectors_compressed(k2);
+
+ ret = bch2_trans_update_extent_overwrite(trans, old_iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
+ k1, k2) ?:
+ bch2_trans_commit(trans, &res, NULL,
+ BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL);
+ bch2_disk_reservation_put(c, &res);
+
+ if (ret)
+ goto err;
+
+ *fixed = true;
+
+ if (pos1.snapshot == pos2.p.snapshot) {
+ /*
+ * We overwrote the first extent, and did the overwrite
+ * in the same snapshot:
+ */
+ extent_end->offset = bkey_start_offset(&pos2);
+ } else if (pos1.snapshot > pos2.p.snapshot) {
+ /*
+ * We overwrote the first extent in pos2's snapshot:
+ */
+ ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
+ } else {
+ /*
+ * We overwrote the second extent - restart
+ * check_extent() from the top:
+ */
+ ret = -BCH_ERR_transaction_restart_nested;
+ }
+ }
+fsck_err:
+err:
+ bch2_trans_iter_exit(trans, &iter2);
+ bch2_trans_iter_exit(trans, &iter1);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int check_overlapping_extents(struct btree_trans *trans,
+ struct snapshots_seen *seen,
+ struct extent_ends *extent_ends,
+ struct bkey_s_c k,
+ u32 equiv,
+ struct btree_iter *iter,
+ bool *fixed)
+{
+ struct bch_fs *c = trans->c;
+ struct extent_end *i;
+ int ret = 0;
+
+ /* transaction restart, running again */
+ if (bpos_eq(extent_ends->last_pos, k.k->p))
+ return 0;
+
+ if (extent_ends->last_pos.inode != k.k->p.inode)
+ extent_ends_reset(extent_ends);
+
+ darray_for_each(extent_ends->e, i) {
+ if (i->offset <= bkey_start_offset(k.k))
+ continue;
+
+ if (!ref_visible2(c,
+ k.k->p.snapshot, seen,
+ i->snapshot, &i->seen))
+ continue;
+
+ ret = overlapping_extents_found(trans, iter->btree_id,
+ SPOS(iter->pos.inode,
+ i->offset,
+ i->snapshot),
+ &i->seen,
+ *k.k, fixed, i);
+ if (ret)
+ goto err;
+ }
+
+ ret = extent_ends_at(c, extent_ends, seen, k);
+ if (ret)
+ goto err;
+
+ extent_ends->last_pos = k.k->p;
+err:
+ return ret;
+}
+
+static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct inode_walker *inode,
+ struct snapshots_seen *s,
+ struct extent_ends *extent_ends)
+{
+ struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
+ struct printbuf buf = PRINTBUF;
+ struct bpos equiv = k.k->p;
+ int ret = 0;
+
+ equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
+
+ ret = check_key_has_snapshot(trans, iter, k);
+ if (ret) {
+ ret = ret < 0 ? ret : 0;
+ goto out;
+ }
+
+ if (inode->last_pos.inode != k.k->p.inode) {
+ ret = check_i_sectors(trans, inode);
+ if (ret)
+ goto err;
+ }
+
+ i = walk_inode(trans, inode, equiv, k.k->type == KEY_TYPE_whiteout);
+ ret = PTR_ERR_OR_ZERO(i);
+ if (ret)
+ goto err;
+
+ ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
+ if (ret)
+ goto err;
+
+ if (k.k->type != KEY_TYPE_whiteout) {
+ if (fsck_err_on(!i, c,
+ "extent in missing inode:\n %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ goto delete;
+
+ if (fsck_err_on(i &&
+ !S_ISREG(i->inode.bi_mode) &&
+ !S_ISLNK(i->inode.bi_mode), c,
+ "extent in non regular inode mode %o:\n %s",
+ i->inode.bi_mode,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ goto delete;
+
+ ret = check_overlapping_extents(trans, s, extent_ends, k,
+ equiv.snapshot, iter,
+ &inode->recalculate_sums);
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * Check inodes in reverse order, from oldest snapshots to newest,
+ * starting from the inode that matches this extent's snapshot. If we
+ * didn't have one, iterate over all inodes:
+ */
+ if (!i)
+ i = inode->inodes.data + inode->inodes.nr - 1;
+
+ for (;
+ inode->inodes.data && i >= inode->inodes.data;
+ --i) {
+ if (i->snapshot > equiv.snapshot ||
+ !key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
+ continue;
+
+ if (k.k->type != KEY_TYPE_whiteout) {
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+ k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
+ !bkey_extent_is_reservation(k), c,
+ "extent type past end of inode %llu:%u, i_size %llu\n %s",
+ i->inode.bi_inum, i->snapshot, i->inode.bi_size,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ struct btree_iter iter2;
+
+ bch2_trans_copy_iter(&iter2, iter);
+ bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
+ ret = bch2_btree_iter_traverse(&iter2) ?:
+ bch2_btree_delete_at(trans, &iter2,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch2_trans_iter_exit(trans, &iter2);
+ if (ret)
+ goto err;
+
+ iter->k.type = KEY_TYPE_whiteout;
+ }
+
+ if (bkey_extent_is_allocation(k.k))
+ i->count += k.k->size;
+ }
+
+ i->seen_this_pos = true;
+ }
+out:
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+delete:
+ ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ goto out;
+}
+
+/*
+ * Walk extents: verify that extents have a corresponding S_ISREG inode, and
+ * that i_size an i_sectors are consistent
+ */
+int bch2_check_extents(struct bch_fs *c)
+{
+ struct inode_walker w = inode_walker_init();
+ struct snapshots_seen s;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct extent_ends extent_ends;
+ struct disk_reservation res = { 0 };
+ int ret = 0;
+
+ snapshots_seen_init(&s);
+ extent_ends_init(&extent_ends);
+
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
+ POS(BCACHEFS_ROOT_INO, 0),
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ &res, NULL,
+ BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
+ bch2_disk_reservation_put(c, &res);
+ check_extent(trans, &iter, k, &w, &s, &extent_ends);
+ })) ?:
+ check_i_sectors(trans, &w);
+
+ bch2_disk_reservation_put(c, &res);
+ extent_ends_exit(&extent_ends);
+ inode_walker_exit(&w);
+ snapshots_seen_exit(&s);
+ bch2_trans_put(trans);
+
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
+{
+ struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
+ u32 restart_count = trans->restart_count;
+ int ret = 0;
+ s64 count2;
+
+ darray_for_each(w->inodes, i) {
+ if (i->inode.bi_nlink == i->count)
+ continue;
+
+ count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
+ if (count2 < 0)
+ return count2;
+
+ if (i->count != count2) {
+ bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
+ i->count, count2);
+ i->count = count2;
+ if (i->inode.bi_nlink == i->count)
+ continue;
+ }
+
+ if (fsck_err_on(i->inode.bi_nlink != i->count, c,
+ "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
+ w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
+ i->inode.bi_nlink = i->count;
+ ret = fsck_write_inode(trans, &i->inode, i->snapshot);
+ if (ret)
+ break;
+ }
+ }
+fsck_err:
+ bch_err_fn(c, ret);
+ return ret ?: trans_was_restarted(trans, restart_count);
+}
+
+static int check_dirent_target(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *target,
+ u32 target_snapshot)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i_dirent *n;
+ bool backpointer_exists = true;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (!target->bi_dir &&
+ !target->bi_dir_offset) {
+ target->bi_dir = d.k->p.inode;
+ target->bi_dir_offset = d.k->p.offset;
+
+ ret = __write_inode(trans, target, target_snapshot);
+ if (ret)
+ goto err;
+ }
+
+ if (!inode_points_to_dirent(target, d)) {
+ ret = inode_backpointer_exists(trans, target, d.k->p.snapshot);
+ if (ret < 0)
+ goto err;
+
+ backpointer_exists = ret;
+ ret = 0;
+
+ if (fsck_err_on(S_ISDIR(target->bi_mode) &&
+ backpointer_exists, c,
+ "directory %llu with multiple links",
+ target->bi_inum)) {
+ ret = __remove_dirent(trans, d.k->p);
+ goto out;
+ }
+
+ if (fsck_err_on(backpointer_exists &&
+ !target->bi_nlink, c,
+ "inode %llu type %s has multiple links but i_nlink 0",
+ target->bi_inum, bch2_d_types[d.v->d_type])) {
+ target->bi_nlink++;
+ target->bi_flags &= ~BCH_INODE_UNLINKED;
+
+ ret = __write_inode(trans, target, target_snapshot);
+ if (ret)
+ goto err;
+ }
+
+ if (fsck_err_on(!backpointer_exists, c,
+ "inode %llu:%u has wrong backpointer:\n"
+ "got %llu:%llu\n"
+ "should be %llu:%llu",
+ target->bi_inum, target_snapshot,
+ target->bi_dir,
+ target->bi_dir_offset,
+ d.k->p.inode,
+ d.k->p.offset)) {
+ target->bi_dir = d.k->p.inode;
+ target->bi_dir_offset = d.k->p.offset;
+
+ ret = __write_inode(trans, target, target_snapshot);
+ if (ret)
+ goto err;
+ }
+ }
+
+ if (fsck_err_on(d.v->d_type != inode_d_type(target), c,
+ "incorrect d_type: got %s, should be %s:\n%s",
+ bch2_d_type_str(d.v->d_type),
+ bch2_d_type_str(inode_d_type(target)),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
+ n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ goto err;
+
+ bkey_reassemble(&n->k_i, d.s_c);
+ n->v.d_type = inode_d_type(target);
+
+ ret = bch2_trans_update(trans, iter, &n->k_i, 0);
+ if (ret)
+ goto err;
+
+ d = dirent_i_to_s_c(n);
+ }
+
+ if (d.v->d_type == DT_SUBVOL &&
+ target->bi_parent_subvol != le32_to_cpu(d.v->d_parent_subvol) &&
+ (c->sb.version < bcachefs_metadata_version_subvol_dirent ||
+ fsck_err(c, "dirent has wrong d_parent_subvol field: got %u, should be %u",
+ le32_to_cpu(d.v->d_parent_subvol),
+ target->bi_parent_subvol))) {
+ n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ goto err;
+
+ bkey_reassemble(&n->k_i, d.s_c);
+ n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
+
+ ret = bch2_trans_update(trans, iter, &n->k_i, 0);
+ if (ret)
+ goto err;
+
+ d = dirent_i_to_s_c(n);
+ }
+out:
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct bch_hash_info *hash_info,
+ struct inode_walker *dir,
+ struct inode_walker *target,
+ struct snapshots_seen *s)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c_dirent d;
+ struct inode_walker_entry *i;
+ struct printbuf buf = PRINTBUF;
+ struct bpos equiv;
+ int ret = 0;
+
+ ret = check_key_has_snapshot(trans, iter, k);
+ if (ret) {
+ ret = ret < 0 ? ret : 0;
+ goto out;
+ }
+
+ equiv = k.k->p;
+ equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
+
+ ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
+ if (ret)
+ goto err;
+
+ if (k.k->type == KEY_TYPE_whiteout)
+ goto out;
+
+ if (dir->last_pos.inode != k.k->p.inode) {
+ ret = check_subdir_count(trans, dir);
+ if (ret)
+ goto err;
+ }
+
+ BUG_ON(!iter->path->should_be_locked);
+
+ i = walk_inode(trans, dir, equiv, k.k->type == KEY_TYPE_whiteout);
+ ret = PTR_ERR_OR_ZERO(i);
+ if (ret < 0)
+ goto err;
+
+ if (dir->first_this_inode && dir->inodes.nr)
+ *hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
+ dir->first_this_inode = false;
+
+ if (fsck_err_on(!i, c,
+ "dirent in nonexisting directory:\n%s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ goto out;
+ }
+
+ if (!i)
+ goto out;
+
+ if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
+ "dirent in non directory inode type %s:\n%s",
+ bch2_d_type_str(inode_d_type(&i->inode)),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter, 0);
+ goto out;
+ }
+
+ ret = hash_check_key(trans, bch2_dirent_hash_desc, hash_info, iter, k);
+ if (ret < 0)
+ goto err;
+ if (ret) {
+ /* dirent has been deleted */
+ ret = 0;
+ goto out;
+ }
+
+ if (k.k->type != KEY_TYPE_dirent)
+ goto out;
+
+ d = bkey_s_c_to_dirent(k);
+
+ if (d.v->d_type == DT_SUBVOL) {
+ struct bch_inode_unpacked subvol_root;
+ u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
+ u32 target_snapshot;
+ u64 target_inum;
+
+ ret = __subvol_lookup(trans, target_subvol,
+ &target_snapshot, &target_inum);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ goto err;
+
+ if (fsck_err_on(ret, c,
+ "dirent points to missing subvolume %u",
+ le32_to_cpu(d.v->d_child_subvol))) {
+ ret = __remove_dirent(trans, d.k->p);
+ goto err;
+ }
+
+ ret = __lookup_inode(trans, target_inum,
+ &subvol_root, &target_snapshot);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ goto err;
+
+ if (fsck_err_on(ret, c,
+ "subvolume %u points to missing subvolume root %llu",
+ target_subvol,
+ target_inum)) {
+ bch_err(c, "repair not implemented yet");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (fsck_err_on(subvol_root.bi_subvol != target_subvol, c,
+ "subvol root %llu has wrong bi_subvol field: got %u, should be %u",
+ target_inum,
+ subvol_root.bi_subvol, target_subvol)) {
+ subvol_root.bi_subvol = target_subvol;
+ ret = __write_inode(trans, &subvol_root, target_snapshot);
+ if (ret)
+ goto err;
+ }
+
+ ret = check_dirent_target(trans, iter, d, &subvol_root,
+ target_snapshot);
+ if (ret)
+ goto err;
+ } else {
+ ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
+ if (ret)
+ goto err;
+
+ if (fsck_err_on(!target->inodes.nr, c,
+ "dirent points to missing inode: (equiv %u)\n%s",
+ equiv.snapshot,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k),
+ buf.buf))) {
+ ret = __remove_dirent(trans, d.k->p);
+ if (ret)
+ goto err;
+ }
+
+ darray_for_each(target->inodes, i) {
+ ret = check_dirent_target(trans, iter, d,
+ &i->inode, i->snapshot);
+ if (ret)
+ goto err;
+ }
+ }
+
+ if (d.v->d_type == DT_DIR)
+ for_each_visible_inode(c, s, dir, equiv.snapshot, i)
+ i->count++;
+
+out:
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/*
+ * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
+ * validate d_type
+ */
+int bch2_check_dirents(struct bch_fs *c)
+{
+ struct inode_walker dir = inode_walker_init();
+ struct inode_walker target = inode_walker_init();
+ struct snapshots_seen s;
+ struct bch_hash_info hash_info;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ snapshots_seen_init(&s);
+
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
+ POS(BCACHEFS_ROOT_INO, 0),
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ k,
+ NULL, NULL,
+ BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s));
+
+ bch2_trans_put(trans);
+ snapshots_seen_exit(&s);
+ inode_walker_exit(&dir);
+ inode_walker_exit(&target);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct bch_hash_info *hash_info,
+ struct inode_walker *inode)
+{
+ struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
+ int ret;
+
+ ret = check_key_has_snapshot(trans, iter, k);
+ if (ret)
+ return ret;
+
+ i = walk_inode(trans, inode, k.k->p, k.k->type == KEY_TYPE_whiteout);
+ ret = PTR_ERR_OR_ZERO(i);
+ if (ret)
+ return ret;
+
+ if (inode->first_this_inode && inode->inodes.nr)
+ *hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
+ inode->first_this_inode = false;
+
+ if (fsck_err_on(!i, c,
+ "xattr for missing inode %llu",
+ k.k->p.inode))
+ return bch2_btree_delete_at(trans, iter, 0);
+
+ if (!i)
+ return 0;
+
+ ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
+fsck_err:
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/*
+ * Walk xattrs: verify that they all have a corresponding inode
+ */
+int bch2_check_xattrs(struct bch_fs *c)
+{
+ struct inode_walker inode = inode_walker_init();
+ struct bch_hash_info hash_info;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
+ POS(BCACHEFS_ROOT_INO, 0),
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ k,
+ NULL, NULL,
+ BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_xattr(trans, &iter, k, &hash_info, &inode)));
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int check_root_trans(struct btree_trans *trans)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked root_inode;
+ u32 snapshot;
+ u64 inum;
+ int ret;
+
+ ret = __subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ return ret;
+
+ if (mustfix_fsck_err_on(ret, c, "root subvol missing")) {
+ struct bkey_i_subvolume root_subvol;
+
+ snapshot = U32_MAX;
+ inum = BCACHEFS_ROOT_INO;
+
+ bkey_subvolume_init(&root_subvol.k_i);
+ root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
+ root_subvol.v.flags = 0;
+ root_subvol.v.snapshot = cpu_to_le32(snapshot);
+ root_subvol.v.inode = cpu_to_le64(inum);
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ bch2_btree_insert_trans(trans, BTREE_ID_subvolumes,
+ &root_subvol.k_i, 0));
+ bch_err_msg(c, ret, "writing root subvol");
+ if (ret)
+ goto err;
+
+ }
+
+ ret = __lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ return ret;
+
+ if (mustfix_fsck_err_on(ret, c, "root directory missing") ||
+ mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), c,
+ "root inode not a directory")) {
+ bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
+ 0, NULL);
+ root_inode.bi_inum = inum;
+
+ ret = __write_inode(trans, &root_inode, snapshot);
+ bch_err_msg(c, ret, "writing root inode");
+ }
+err:
+fsck_err:
+ return ret;
+}
+
+/* Get root directory, create if it doesn't exist: */
+int bch2_check_root(struct bch_fs *c)
+{
+ int ret;
+
+ ret = bch2_trans_do(c, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ check_root_trans(trans));
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+struct pathbuf_entry {
+ u64 inum;
+ u32 snapshot;
+};
+
+typedef DARRAY(struct pathbuf_entry) pathbuf;
+
+static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
+{
+ struct pathbuf_entry *i;
+
+ darray_for_each(*p, i)
+ if (i->inum == inum &&
+ i->snapshot == snapshot)
+ return true;
+
+ return false;
+}
+
+static int path_down(struct bch_fs *c, pathbuf *p,
+ u64 inum, u32 snapshot)
+{
+ int ret = darray_push(p, ((struct pathbuf_entry) {
+ .inum = inum,
+ .snapshot = snapshot,
+ }));
+
+ if (ret)
+ bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
+ p->size);
+ return ret;
+}
+
+/*
+ * Check that a given inode is reachable from the root:
+ *
+ * XXX: we should also be verifying that inodes are in the right subvolumes
+ */
+static int check_path(struct btree_trans *trans,
+ pathbuf *p,
+ struct bch_inode_unpacked *inode,
+ u32 snapshot)
+{
+ struct bch_fs *c = trans->c;
+ int ret = 0;
+
+ snapshot = bch2_snapshot_equiv(c, snapshot);
+ p->nr = 0;
+
+ while (!(inode->bi_inum == BCACHEFS_ROOT_INO &&
+ inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)) {
+ struct btree_iter dirent_iter;
+ struct bkey_s_c_dirent d;
+ u32 parent_snapshot = snapshot;
+
+ if (inode->bi_subvol) {
+ u64 inum;
+
+ ret = subvol_lookup(trans, inode->bi_parent_subvol,
+ &parent_snapshot, &inum);
+ if (ret)
+ break;
+ }
+
+ ret = lockrestart_do(trans,
+ PTR_ERR_OR_ZERO((d = dirent_get_by_pos(trans, &dirent_iter,
+ SPOS(inode->bi_dir, inode->bi_dir_offset,
+ parent_snapshot))).k));
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ break;
+
+ if (!ret && !dirent_points_to_inode(d, inode)) {
+ bch2_trans_iter_exit(trans, &dirent_iter);
+ ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
+ }
+
+ if (bch2_err_matches(ret, ENOENT)) {
+ if (fsck_err(c, "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
+ inode->bi_inum, snapshot,
+ bch2_d_type_str(inode_d_type(inode)),
+ inode->bi_nlink,
+ inode->bi_dir,
+ inode->bi_dir_offset))
+ ret = reattach_inode(trans, inode, snapshot);
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &dirent_iter);
+
+ if (!S_ISDIR(inode->bi_mode))
+ break;
+
+ ret = path_down(c, p, inode->bi_inum, snapshot);
+ if (ret) {
+ bch_err(c, "memory allocation failure");
+ return ret;
+ }
+
+ snapshot = parent_snapshot;
+
+ ret = lookup_inode(trans, inode->bi_dir, inode, &snapshot);
+ if (ret) {
+ /* Should have been caught in dirents pass */
+ bch_err(c, "error looking up parent directory: %i", ret);
+ break;
+ }
+
+ if (path_is_dup(p, inode->bi_inum, snapshot)) {
+ struct pathbuf_entry *i;
+
+ /* XXX print path */
+ bch_err(c, "directory structure loop");
+
+ darray_for_each(*p, i)
+ pr_err("%llu:%u", i->inum, i->snapshot);
+ pr_err("%llu:%u", inode->bi_inum, snapshot);
+
+ if (!fsck_err(c, "directory structure loop"))
+ return 0;
+
+ ret = commit_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ remove_backpointer(trans, inode));
+ if (ret) {
+ bch_err(c, "error removing dirent: %i", ret);
+ break;
+ }
+
+ ret = reattach_inode(trans, inode, snapshot);
+ }
+ }
+fsck_err:
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/*
+ * Check for unreachable inodes, as well as loops in the directory structure:
+ * After bch2_check_dirents(), if an inode backpointer doesn't exist that means it's
+ * unreachable:
+ */
+int bch2_check_directory_structure(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_inode_unpacked u;
+ pathbuf path = { 0, };
+ int ret;
+
+ for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ if (!bkey_is_inode(k.k))
+ continue;
+
+ ret = bch2_inode_unpack(k, &u);
+ if (ret) {
+ /* Should have been caught earlier in fsck: */
+ bch_err(c, "error unpacking inode %llu: %i", k.k->p.offset, ret);
+ break;
+ }
+
+ if (u.bi_flags & BCH_INODE_UNLINKED)
+ continue;
+
+ ret = check_path(trans, &path, &u, iter.pos.snapshot);
+ if (ret)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ darray_exit(&path);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+struct nlink_table {
+ size_t nr;
+ size_t size;
+
+ struct nlink {
+ u64 inum;
+ u32 snapshot;
+ u32 count;
+ } *d;
+};
+
+static int add_nlink(struct bch_fs *c, struct nlink_table *t,
+ u64 inum, u32 snapshot)
+{
+ if (t->nr == t->size) {
+ size_t new_size = max_t(size_t, 128UL, t->size * 2);
+ void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
+
+ if (!d) {
+ bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
+ new_size);
+ return -BCH_ERR_ENOMEM_fsck_add_nlink;
+ }
+
+ if (t->d)
+ memcpy(d, t->d, t->size * sizeof(t->d[0]));
+ kvfree(t->d);
+
+ t->d = d;
+ t->size = new_size;
+ }
+
+
+ t->d[t->nr++] = (struct nlink) {
+ .inum = inum,
+ .snapshot = snapshot,
+ };
+
+ return 0;
+}
+
+static int nlink_cmp(const void *_l, const void *_r)
+{
+ const struct nlink *l = _l;
+ const struct nlink *r = _r;
+
+ return cmp_int(l->inum, r->inum) ?: cmp_int(l->snapshot, r->snapshot);
+}
+
+static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
+ struct nlink_table *links,
+ u64 range_start, u64 range_end, u64 inum, u32 snapshot)
+{
+ struct nlink *link, key = {
+ .inum = inum, .snapshot = U32_MAX,
+ };
+
+ if (inum < range_start || inum >= range_end)
+ return;
+
+ link = __inline_bsearch(&key, links->d, links->nr,
+ sizeof(links->d[0]), nlink_cmp);
+ if (!link)
+ return;
+
+ while (link > links->d && link[0].inum == link[-1].inum)
+ --link;
+
+ for (; link < links->d + links->nr && link->inum == inum; link++)
+ if (ref_visible(c, s, snapshot, link->snapshot)) {
+ link->count++;
+ if (link->snapshot >= snapshot)
+ break;
+ }
+}
+
+noinline_for_stack
+static int check_nlinks_find_hardlinks(struct bch_fs *c,
+ struct nlink_table *t,
+ u64 start, u64 *end)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_inode_unpacked u;
+ int ret = 0;
+
+ for_each_btree_key(trans, iter, BTREE_ID_inodes,
+ POS(0, start),
+ BTREE_ITER_INTENT|
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ if (!bkey_is_inode(k.k))
+ continue;
+
+ /* Should never fail, checked by bch2_inode_invalid: */
+ BUG_ON(bch2_inode_unpack(k, &u));
+
+ /*
+ * Backpointer and directory structure checks are sufficient for
+ * directories, since they can't have hardlinks:
+ */
+ if (S_ISDIR(u.bi_mode))
+ continue;
+
+ if (!u.bi_nlink)
+ continue;
+
+ ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
+ if (ret) {
+ *end = k.k->p.offset;
+ ret = 0;
+ break;
+ }
+
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+
+ if (ret)
+ bch_err(c, "error in fsck: btree error %i while walking inodes", ret);
+
+ return ret;
+}
+
+noinline_for_stack
+static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
+ u64 range_start, u64 range_end)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct snapshots_seen s;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_dirent d;
+ int ret;
+
+ snapshots_seen_init(&s);
+
+ for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
+ if (ret)
+ break;
+
+ switch (k.k->type) {
+ case KEY_TYPE_dirent:
+ d = bkey_s_c_to_dirent(k);
+
+ if (d.v->d_type != DT_DIR &&
+ d.v->d_type != DT_SUBVOL)
+ inc_link(c, &s, links, range_start, range_end,
+ le64_to_cpu(d.v->d_inum),
+ bch2_snapshot_equiv(c, d.k->p.snapshot));
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ bch_err(c, "error in fsck: btree error %i while walking dirents", ret);
+
+ bch2_trans_put(trans);
+ snapshots_seen_exit(&s);
+ return ret;
+}
+
+static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct nlink_table *links,
+ size_t *idx, u64 range_end)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked u;
+ struct nlink *link = &links->d[*idx];
+ int ret = 0;
+
+ if (k.k->p.offset >= range_end)
+ return 1;
+
+ if (!bkey_is_inode(k.k))
+ return 0;
+
+ BUG_ON(bch2_inode_unpack(k, &u));
+
+ if (S_ISDIR(u.bi_mode))
+ return 0;
+
+ if (!u.bi_nlink)
+ return 0;
+
+ while ((cmp_int(link->inum, k.k->p.offset) ?:
+ cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
+ BUG_ON(*idx == links->nr);
+ link = &links->d[++*idx];
+ }
+
+ if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c,
+ "inode %llu type %s has wrong i_nlink (%u, should be %u)",
+ u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
+ bch2_inode_nlink_get(&u), link->count)) {
+ bch2_inode_nlink_set(&u, link->count);
+ ret = __write_inode(trans, &u, k.k->p.snapshot);
+ }
+fsck_err:
+ return ret;
+}
+
+noinline_for_stack
+static int check_nlinks_update_hardlinks(struct bch_fs *c,
+ struct nlink_table *links,
+ u64 range_start, u64 range_end)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ size_t idx = 0;
+ int ret = 0;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
+ POS(0, range_start),
+ BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
+ if (ret < 0) {
+ bch_err(c, "error in fsck: btree error %i while walking inodes", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int bch2_check_nlinks(struct bch_fs *c)
+{
+ struct nlink_table links = { 0 };
+ u64 this_iter_range_start, next_iter_range_start = 0;
+ int ret = 0;
+
+ do {
+ this_iter_range_start = next_iter_range_start;
+ next_iter_range_start = U64_MAX;
+
+ ret = check_nlinks_find_hardlinks(c, &links,
+ this_iter_range_start,
+ &next_iter_range_start);
+
+ ret = check_nlinks_walk_dirents(c, &links,
+ this_iter_range_start,
+ next_iter_range_start);
+ if (ret)
+ break;
+
+ ret = check_nlinks_update_hardlinks(c, &links,
+ this_iter_range_start,
+ next_iter_range_start);
+ if (ret)
+ break;
+
+ links.nr = 0;
+ } while (next_iter_range_start != U64_MAX);
+
+ kvfree(links.d);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_reflink_p p;
+ struct bkey_i_reflink_p *u;
+ int ret;
+
+ if (k.k->type != KEY_TYPE_reflink_p)
+ return 0;
+
+ p = bkey_s_c_to_reflink_p(k);
+
+ if (!p.v->front_pad && !p.v->back_pad)
+ return 0;
+
+ u = bch2_trans_kmalloc(trans, sizeof(*u));
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ return ret;
+
+ bkey_reassemble(&u->k_i, k);
+ u->v.front_pad = 0;
+ u->v.back_pad = 0;
+
+ return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
+}
+
+int bch2_fix_reflink_p(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
+ return 0;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_extents, POS_MIN,
+ BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ fix_reflink_p_key(trans, &iter, k)));
+ bch_err_fn(c, ret);
+ return ret;
+}
diff --git a/fs/bcachefs/fsck.h b/fs/bcachefs/fsck.h
new file mode 100644
index 000000000000..90c87b5089a0
--- /dev/null
+++ b/fs/bcachefs/fsck.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_FSCK_H
+#define _BCACHEFS_FSCK_H
+
+int bch2_check_inodes(struct bch_fs *);
+int bch2_check_extents(struct bch_fs *);
+int bch2_check_dirents(struct bch_fs *);
+int bch2_check_xattrs(struct bch_fs *);
+int bch2_check_root(struct bch_fs *);
+int bch2_check_directory_structure(struct bch_fs *);
+int bch2_check_nlinks(struct bch_fs *);
+int bch2_fix_reflink_p(struct bch_fs *);
+
+#endif /* _BCACHEFS_FSCK_H */
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
new file mode 100644
index 000000000000..bb3f443d8381
--- /dev/null
+++ b/fs/bcachefs/inode.c
@@ -0,0 +1,1133 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_key_cache.h"
+#include "btree_write_buffer.h"
+#include "bkey_methods.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "error.h"
+#include "extents.h"
+#include "extent_update.h"
+#include "inode.h"
+#include "str_hash.h"
+#include "snapshot.h"
+#include "subvolume.h"
+#include "varint.h"
+
+#include <linux/random.h>
+
+#include <asm/unaligned.h>
+
+const char * const bch2_inode_opts[] = {
+#define x(name, ...) #name,
+ BCH_INODE_OPTS()
+#undef x
+ NULL,
+};
+
+static const u8 byte_table[8] = { 1, 2, 3, 4, 6, 8, 10, 13 };
+
+static int inode_decode_field(const u8 *in, const u8 *end,
+ u64 out[2], unsigned *out_bits)
+{
+ __be64 be[2] = { 0, 0 };
+ unsigned bytes, shift;
+ u8 *p;
+
+ if (in >= end)
+ return -1;
+
+ if (!*in)
+ return -1;
+
+ /*
+ * position of highest set bit indicates number of bytes:
+ * shift = number of bits to remove in high byte:
+ */
+ shift = 8 - __fls(*in); /* 1 <= shift <= 8 */
+ bytes = byte_table[shift - 1];
+
+ if (in + bytes > end)
+ return -1;
+
+ p = (u8 *) be + 16 - bytes;
+ memcpy(p, in, bytes);
+ *p ^= (1 << 8) >> shift;
+
+ out[0] = be64_to_cpu(be[0]);
+ out[1] = be64_to_cpu(be[1]);
+ *out_bits = out[0] ? 64 + fls64(out[0]) : fls64(out[1]);
+
+ return bytes;
+}
+
+static inline void bch2_inode_pack_inlined(struct bkey_inode_buf *packed,
+ const struct bch_inode_unpacked *inode)
+{
+ struct bkey_i_inode_v3 *k = &packed->inode;
+ u8 *out = k->v.fields;
+ u8 *end = (void *) &packed[1];
+ u8 *last_nonzero_field = out;
+ unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
+ unsigned bytes;
+ int ret;
+
+ bkey_inode_v3_init(&packed->inode.k_i);
+ packed->inode.k.p.offset = inode->bi_inum;
+ packed->inode.v.bi_journal_seq = cpu_to_le64(inode->bi_journal_seq);
+ packed->inode.v.bi_hash_seed = inode->bi_hash_seed;
+ packed->inode.v.bi_flags = cpu_to_le64(inode->bi_flags);
+ packed->inode.v.bi_sectors = cpu_to_le64(inode->bi_sectors);
+ packed->inode.v.bi_size = cpu_to_le64(inode->bi_size);
+ packed->inode.v.bi_version = cpu_to_le64(inode->bi_version);
+ SET_INODEv3_MODE(&packed->inode.v, inode->bi_mode);
+ SET_INODEv3_FIELDS_START(&packed->inode.v, INODEv3_FIELDS_START_CUR);
+
+
+#define x(_name, _bits) \
+ nr_fields++; \
+ \
+ if (inode->_name) { \
+ ret = bch2_varint_encode_fast(out, inode->_name); \
+ out += ret; \
+ \
+ if (_bits > 64) \
+ *out++ = 0; \
+ \
+ last_nonzero_field = out; \
+ last_nonzero_fieldnr = nr_fields; \
+ } else { \
+ *out++ = 0; \
+ \
+ if (_bits > 64) \
+ *out++ = 0; \
+ }
+
+ BCH_INODE_FIELDS_v3()
+#undef x
+ BUG_ON(out > end);
+
+ out = last_nonzero_field;
+ nr_fields = last_nonzero_fieldnr;
+
+ bytes = out - (u8 *) &packed->inode.v;
+ set_bkey_val_bytes(&packed->inode.k, bytes);
+ memset_u64s_tail(&packed->inode.v, 0, bytes);
+
+ SET_INODEv3_NR_FIELDS(&k->v, nr_fields);
+
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+ struct bch_inode_unpacked unpacked;
+
+ ret = bch2_inode_unpack(bkey_i_to_s_c(&packed->inode.k_i), &unpacked);
+ BUG_ON(ret);
+ BUG_ON(unpacked.bi_inum != inode->bi_inum);
+ BUG_ON(unpacked.bi_hash_seed != inode->bi_hash_seed);
+ BUG_ON(unpacked.bi_sectors != inode->bi_sectors);
+ BUG_ON(unpacked.bi_size != inode->bi_size);
+ BUG_ON(unpacked.bi_version != inode->bi_version);
+ BUG_ON(unpacked.bi_mode != inode->bi_mode);
+
+#define x(_name, _bits) if (unpacked._name != inode->_name) \
+ panic("unpacked %llu should be %llu", \
+ (u64) unpacked._name, (u64) inode->_name);
+ BCH_INODE_FIELDS_v3()
+#undef x
+ }
+}
+
+void bch2_inode_pack(struct bkey_inode_buf *packed,
+ const struct bch_inode_unpacked *inode)
+{
+ bch2_inode_pack_inlined(packed, inode);
+}
+
+static noinline int bch2_inode_unpack_v1(struct bkey_s_c_inode inode,
+ struct bch_inode_unpacked *unpacked)
+{
+ const u8 *in = inode.v->fields;
+ const u8 *end = bkey_val_end(inode);
+ u64 field[2];
+ unsigned fieldnr = 0, field_bits;
+ int ret;
+
+#define x(_name, _bits) \
+ if (fieldnr++ == INODE_NR_FIELDS(inode.v)) { \
+ unsigned offset = offsetof(struct bch_inode_unpacked, _name);\
+ memset((void *) unpacked + offset, 0, \
+ sizeof(*unpacked) - offset); \
+ return 0; \
+ } \
+ \
+ ret = inode_decode_field(in, end, field, &field_bits); \
+ if (ret < 0) \
+ return ret; \
+ \
+ if (field_bits > sizeof(unpacked->_name) * 8) \
+ return -1; \
+ \
+ unpacked->_name = field[1]; \
+ in += ret;
+
+ BCH_INODE_FIELDS_v2()
+#undef x
+
+ /* XXX: signal if there were more fields than expected? */
+ return 0;
+}
+
+static int bch2_inode_unpack_v2(struct bch_inode_unpacked *unpacked,
+ const u8 *in, const u8 *end,
+ unsigned nr_fields)
+{
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v[2];
+
+#define x(_name, _bits) \
+ if (fieldnr < nr_fields) { \
+ ret = bch2_varint_decode_fast(in, end, &v[0]); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ \
+ if (_bits > 64) { \
+ ret = bch2_varint_decode_fast(in, end, &v[1]); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v[1] = 0; \
+ } \
+ } else { \
+ v[0] = v[1] = 0; \
+ } \
+ \
+ unpacked->_name = v[0]; \
+ if (v[1] || v[0] != unpacked->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_INODE_FIELDS_v2()
+#undef x
+
+ /* XXX: signal if there were more fields than expected? */
+ return 0;
+}
+
+static int bch2_inode_unpack_v3(struct bkey_s_c k,
+ struct bch_inode_unpacked *unpacked)
+{
+ struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
+ const u8 *in = inode.v->fields;
+ const u8 *end = bkey_val_end(inode);
+ unsigned nr_fields = INODEv3_NR_FIELDS(inode.v);
+ unsigned fieldnr = 0;
+ int ret;
+ u64 v[2];
+
+ unpacked->bi_inum = inode.k->p.offset;
+ unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq);
+ unpacked->bi_hash_seed = inode.v->bi_hash_seed;
+ unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags);
+ unpacked->bi_sectors = le64_to_cpu(inode.v->bi_sectors);
+ unpacked->bi_size = le64_to_cpu(inode.v->bi_size);
+ unpacked->bi_version = le64_to_cpu(inode.v->bi_version);
+ unpacked->bi_mode = INODEv3_MODE(inode.v);
+
+#define x(_name, _bits) \
+ if (fieldnr < nr_fields) { \
+ ret = bch2_varint_decode_fast(in, end, &v[0]); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ \
+ if (_bits > 64) { \
+ ret = bch2_varint_decode_fast(in, end, &v[1]); \
+ if (ret < 0) \
+ return ret; \
+ in += ret; \
+ } else { \
+ v[1] = 0; \
+ } \
+ } else { \
+ v[0] = v[1] = 0; \
+ } \
+ \
+ unpacked->_name = v[0]; \
+ if (v[1] || v[0] != unpacked->_name) \
+ return -1; \
+ fieldnr++;
+
+ BCH_INODE_FIELDS_v3()
+#undef x
+
+ /* XXX: signal if there were more fields than expected? */
+ return 0;
+}
+
+static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k,
+ struct bch_inode_unpacked *unpacked)
+{
+ memset(unpacked, 0, sizeof(*unpacked));
+
+ switch (k.k->type) {
+ case KEY_TYPE_inode: {
+ struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
+
+ unpacked->bi_inum = inode.k->p.offset;
+ unpacked->bi_journal_seq= 0;
+ unpacked->bi_hash_seed = inode.v->bi_hash_seed;
+ unpacked->bi_flags = le32_to_cpu(inode.v->bi_flags);
+ unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode);
+
+ if (INODE_NEW_VARINT(inode.v)) {
+ return bch2_inode_unpack_v2(unpacked, inode.v->fields,
+ bkey_val_end(inode),
+ INODE_NR_FIELDS(inode.v));
+ } else {
+ return bch2_inode_unpack_v1(inode, unpacked);
+ }
+ break;
+ }
+ case KEY_TYPE_inode_v2: {
+ struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
+
+ unpacked->bi_inum = inode.k->p.offset;
+ unpacked->bi_journal_seq= le64_to_cpu(inode.v->bi_journal_seq);
+ unpacked->bi_hash_seed = inode.v->bi_hash_seed;
+ unpacked->bi_flags = le64_to_cpu(inode.v->bi_flags);
+ unpacked->bi_mode = le16_to_cpu(inode.v->bi_mode);
+
+ return bch2_inode_unpack_v2(unpacked, inode.v->fields,
+ bkey_val_end(inode),
+ INODEv2_NR_FIELDS(inode.v));
+ }
+ default:
+ BUG();
+ }
+}
+
+int bch2_inode_unpack(struct bkey_s_c k,
+ struct bch_inode_unpacked *unpacked)
+{
+ if (likely(k.k->type == KEY_TYPE_inode_v3))
+ return bch2_inode_unpack_v3(k, unpacked);
+ return bch2_inode_unpack_slowpath(k, unpacked);
+}
+
+static int bch2_inode_peek_nowarn(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_inode_unpacked *inode,
+ subvol_inum inum, unsigned flags)
+{
+ struct bkey_s_c k;
+ u32 snapshot;
+ int ret;
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ return ret;
+
+ k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes,
+ SPOS(0, inum.inum, snapshot),
+ flags|BTREE_ITER_CACHED);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
+ if (ret)
+ goto err;
+
+ ret = bch2_inode_unpack(k, inode);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ bch2_trans_iter_exit(trans, iter);
+ return ret;
+}
+
+int bch2_inode_peek(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_inode_unpacked *inode,
+ subvol_inum inum, unsigned flags)
+{
+ int ret = bch2_inode_peek_nowarn(trans, iter, inode, inum, flags);
+ bch_err_msg(trans->c, ret, "looking up inum %u:%llu:", inum.subvol, inum.inum);
+ return ret;
+}
+
+int bch2_inode_write(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_inode_unpacked *inode)
+{
+ struct bkey_inode_buf *inode_p;
+
+ inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
+ if (IS_ERR(inode_p))
+ return PTR_ERR(inode_p);
+
+ bch2_inode_pack_inlined(inode_p, inode);
+ inode_p->inode.k.p.snapshot = iter->snapshot;
+ return bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0);
+}
+
+struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
+{
+ struct bch_inode_unpacked u;
+ struct bkey_inode_buf *inode_p;
+ int ret;
+
+ if (!bkey_is_inode(&k->k))
+ return ERR_PTR(-ENOENT);
+
+ inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
+ if (IS_ERR(inode_p))
+ return ERR_CAST(inode_p);
+
+ ret = bch2_inode_unpack(bkey_i_to_s_c(k), &u);
+ if (ret)
+ return ERR_PTR(ret);
+
+ bch2_inode_pack(inode_p, &u);
+ return &inode_p->inode.k_i;
+}
+
+static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err)
+{
+ struct bch_inode_unpacked unpacked;
+
+ if (k.k->p.inode) {
+ prt_printf(err, "nonzero k.p.inode");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (k.k->p.offset < BLOCKDEV_INODE_MAX) {
+ prt_printf(err, "fs inode in blockdev range");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (bch2_inode_unpack(k, &unpacked)) {
+ prt_printf(err, "invalid variable length fields");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1) {
+ prt_printf(err, "invalid data checksum type (%u >= %u",
+ unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (unpacked.bi_compression >= BCH_COMPRESSION_OPT_NR + 1) {
+ prt_printf(err, "invalid data checksum type (%u >= %u)",
+ unpacked.bi_compression, BCH_COMPRESSION_OPT_NR + 1);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if ((unpacked.bi_flags & BCH_INODE_UNLINKED) &&
+ unpacked.bi_nlink != 0) {
+ prt_printf(err, "flagged as unlinked but bi_nlink != 0");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode)) {
+ prt_printf(err, "subvolume root but not a directory");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+int bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
+
+ if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
+ prt_printf(err, "invalid str hash type (%llu >= %u)",
+ INODE_STR_HASH(inode.v), BCH_STR_HASH_NR);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return __bch2_inode_invalid(k, err);
+}
+
+int bch2_inode_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
+
+ if (INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
+ prt_printf(err, "invalid str hash type (%llu >= %u)",
+ INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return __bch2_inode_invalid(k, err);
+}
+
+int bch2_inode_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
+
+ if (INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
+ INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k)) {
+ prt_printf(err, "invalid fields_start (got %llu, min %u max %zu)",
+ INODEv3_FIELDS_START(inode.v),
+ INODEv3_FIELDS_START_INITIAL,
+ bkey_val_u64s(inode.k));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
+ prt_printf(err, "invalid str hash type (%llu >= %u)",
+ INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return __bch2_inode_invalid(k, err);
+}
+
+static void __bch2_inode_unpacked_to_text(struct printbuf *out,
+ struct bch_inode_unpacked *inode)
+{
+ prt_printf(out, "mode %o flags %x journal_seq %llu bi_size %llu bi_sectors %llu bi_version %llu",
+ inode->bi_mode, inode->bi_flags,
+ inode->bi_journal_seq,
+ inode->bi_size,
+ inode->bi_sectors,
+ inode->bi_version);
+
+#define x(_name, _bits) \
+ prt_printf(out, " "#_name " %llu", (u64) inode->_name);
+ BCH_INODE_FIELDS_v3()
+#undef x
+}
+
+void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
+{
+ prt_printf(out, "inum: %llu ", inode->bi_inum);
+ __bch2_inode_unpacked_to_text(out, inode);
+}
+
+void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bch_inode_unpacked inode;
+
+ if (bch2_inode_unpack(k, &inode)) {
+ prt_printf(out, "(unpack error)");
+ return;
+ }
+
+ __bch2_inode_unpacked_to_text(out, &inode);
+}
+
+static inline u64 bkey_inode_flags(struct bkey_s_c k)
+{
+ switch (k.k->type) {
+ case KEY_TYPE_inode:
+ return le32_to_cpu(bkey_s_c_to_inode(k).v->bi_flags);
+ case KEY_TYPE_inode_v2:
+ return le64_to_cpu(bkey_s_c_to_inode_v2(k).v->bi_flags);
+ case KEY_TYPE_inode_v3:
+ return le64_to_cpu(bkey_s_c_to_inode_v3(k).v->bi_flags);
+ default:
+ return 0;
+ }
+}
+
+static inline bool bkey_is_deleted_inode(struct bkey_s_c k)
+{
+ return bkey_inode_flags(k) & BCH_INODE_UNLINKED;
+}
+
+int bch2_trans_mark_inode(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ struct bkey_i *new,
+ unsigned flags)
+{
+ int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
+ bool old_deleted = bkey_is_deleted_inode(old);
+ bool new_deleted = bkey_is_deleted_inode(bkey_i_to_s_c(new));
+
+ if (nr) {
+ int ret = bch2_replicas_deltas_realloc(trans, 0);
+ struct replicas_delta_list *d = trans->fs_usage_deltas;
+
+ if (ret)
+ return ret;
+
+ d->nr_inodes += nr;
+ }
+
+ if (old_deleted != new_deleted) {
+ int ret = bch2_btree_bit_mod(trans, BTREE_ID_deleted_inodes, new->k.p, new_deleted);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int bch2_mark_inode(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_fs_usage *fs_usage;
+ u64 journal_seq = trans->journal_res.seq;
+
+ if (flags & BTREE_TRIGGER_INSERT) {
+ struct bch_inode_v3 *v = (struct bch_inode_v3 *) new.v;
+
+ BUG_ON(!journal_seq);
+ BUG_ON(new.k->type != KEY_TYPE_inode_v3);
+
+ v->bi_journal_seq = cpu_to_le64(journal_seq);
+ }
+
+ if (flags & BTREE_TRIGGER_GC) {
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
+
+ fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
+ fs_usage->nr_inodes += bkey_is_inode(new.k);
+ fs_usage->nr_inodes -= bkey_is_inode(old.k);
+
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
+ }
+ return 0;
+}
+
+int bch2_inode_generation_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (k.k->p.inode) {
+ prt_printf(err, "nonzero k.p.inode");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_inode_generation gen = bkey_s_c_to_inode_generation(k);
+
+ prt_printf(out, "generation: %u", le32_to_cpu(gen.v->bi_generation));
+}
+
+void bch2_inode_init_early(struct bch_fs *c,
+ struct bch_inode_unpacked *inode_u)
+{
+ enum bch_str_hash_type str_hash =
+ bch2_str_hash_opt_to_type(c, c->opts.str_hash);
+
+ memset(inode_u, 0, sizeof(*inode_u));
+
+ /* ick */
+ inode_u->bi_flags |= str_hash << INODE_STR_HASH_OFFSET;
+ get_random_bytes(&inode_u->bi_hash_seed,
+ sizeof(inode_u->bi_hash_seed));
+}
+
+void bch2_inode_init_late(struct bch_inode_unpacked *inode_u, u64 now,
+ uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
+ struct bch_inode_unpacked *parent)
+{
+ inode_u->bi_mode = mode;
+ inode_u->bi_uid = uid;
+ inode_u->bi_gid = gid;
+ inode_u->bi_dev = rdev;
+ inode_u->bi_atime = now;
+ inode_u->bi_mtime = now;
+ inode_u->bi_ctime = now;
+ inode_u->bi_otime = now;
+
+ if (parent && parent->bi_mode & S_ISGID) {
+ inode_u->bi_gid = parent->bi_gid;
+ if (S_ISDIR(mode))
+ inode_u->bi_mode |= S_ISGID;
+ }
+
+ if (parent) {
+#define x(_name, ...) inode_u->bi_##_name = parent->bi_##_name;
+ BCH_INODE_OPTS()
+#undef x
+ }
+}
+
+void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
+ uid_t uid, gid_t gid, umode_t mode, dev_t rdev,
+ struct bch_inode_unpacked *parent)
+{
+ bch2_inode_init_early(c, inode_u);
+ bch2_inode_init_late(inode_u, bch2_current_time(c),
+ uid, gid, mode, rdev, parent);
+}
+
+static inline u32 bkey_generation(struct bkey_s_c k)
+{
+ switch (k.k->type) {
+ case KEY_TYPE_inode:
+ case KEY_TYPE_inode_v2:
+ BUG();
+ case KEY_TYPE_inode_generation:
+ return le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * This just finds an empty slot:
+ */
+int bch2_inode_create(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_inode_unpacked *inode_u,
+ u32 snapshot, u64 cpu)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ u64 min, max, start, pos, *hint;
+ int ret = 0;
+ unsigned bits = (c->opts.inodes_32bit ? 31 : 63);
+
+ if (c->opts.shard_inode_numbers) {
+ bits -= c->inode_shard_bits;
+
+ min = (cpu << bits);
+ max = (cpu << bits) | ~(ULLONG_MAX << bits);
+
+ min = max_t(u64, min, BLOCKDEV_INODE_MAX);
+ hint = c->unused_inode_hints + cpu;
+ } else {
+ min = BLOCKDEV_INODE_MAX;
+ max = ~(ULLONG_MAX << bits);
+ hint = c->unused_inode_hints;
+ }
+
+ start = READ_ONCE(*hint);
+
+ if (start >= max || start < min)
+ start = min;
+
+ pos = start;
+ bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_INTENT);
+again:
+ while ((k = bch2_btree_iter_peek(iter)).k &&
+ !(ret = bkey_err(k)) &&
+ bkey_lt(k.k->p, POS(0, max))) {
+ if (pos < iter->pos.offset)
+ goto found_slot;
+
+ /*
+ * We don't need to iterate over keys in every snapshot once
+ * we've found just one:
+ */
+ pos = iter->pos.offset + 1;
+ bch2_btree_iter_set_pos(iter, POS(0, pos));
+ }
+
+ if (!ret && pos < max)
+ goto found_slot;
+
+ if (!ret && start == min)
+ ret = -BCH_ERR_ENOSPC_inode_create;
+
+ if (ret) {
+ bch2_trans_iter_exit(trans, iter);
+ return ret;
+ }
+
+ /* Retry from start */
+ pos = start = min;
+ bch2_btree_iter_set_pos(iter, POS(0, pos));
+ goto again;
+found_slot:
+ bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret) {
+ bch2_trans_iter_exit(trans, iter);
+ return ret;
+ }
+
+ *hint = k.k->p.offset;
+ inode_u->bi_inum = k.k->p.offset;
+ inode_u->bi_generation = bkey_generation(k);
+ return 0;
+}
+
+static int bch2_inode_delete_keys(struct btree_trans *trans,
+ subvol_inum inum, enum btree_id id)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_i delete;
+ struct bpos end = POS(inum.inum, U64_MAX);
+ u32 snapshot;
+ int ret = 0;
+
+ /*
+ * We're never going to be deleting partial extents, no need to use an
+ * extent iterator:
+ */
+ bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
+ BTREE_ITER_INTENT);
+
+ while (1) {
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
+
+ k = bch2_btree_iter_peek_upto(&iter, end);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!k.k)
+ break;
+
+ bkey_init(&delete.k);
+ delete.k.p = iter.pos;
+
+ if (iter.flags & BTREE_ITER_IS_EXTENTS)
+ bch2_key_resize(&delete.k,
+ bpos_min(end, k.k->p).offset -
+ iter.pos.offset);
+
+ ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+err:
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter = { NULL };
+ struct bkey_i_inode_generation delete;
+ struct bch_inode_unpacked inode_u;
+ struct bkey_s_c k;
+ u32 snapshot;
+ int ret;
+
+ /*
+ * If this was a directory, there shouldn't be any real dirents left -
+ * but there could be whiteouts (from hash collisions) that we should
+ * delete:
+ *
+ * XXX: the dirent could ideally would delete whiteouts when they're no
+ * longer needed
+ */
+ ret = bch2_inode_delete_keys(trans, inum, BTREE_ID_extents) ?:
+ bch2_inode_delete_keys(trans, inum, BTREE_ID_xattrs) ?:
+ bch2_inode_delete_keys(trans, inum, BTREE_ID_dirents);
+ if (ret)
+ goto err;
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+ SPOS(0, inum.inum, snapshot),
+ BTREE_ITER_INTENT|BTREE_ITER_CACHED);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!bkey_is_inode(k.k)) {
+ bch2_fs_inconsistent(c,
+ "inode %llu:%u not found when deleting",
+ inum.inum, snapshot);
+ ret = -EIO;
+ goto err;
+ }
+
+ bch2_inode_unpack(k, &inode_u);
+
+ bkey_inode_generation_init(&delete.k_i);
+ delete.k.p = iter.pos;
+ delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
+
+ ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *trans,
+ subvol_inum inum,
+ struct bch_inode_unpacked *inode)
+{
+ struct btree_iter iter;
+ int ret;
+
+ ret = bch2_inode_peek_nowarn(trans, &iter, inode, inum, 0);
+ if (!ret)
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_inode_find_by_inum_trans(struct btree_trans *trans,
+ subvol_inum inum,
+ struct bch_inode_unpacked *inode)
+{
+ struct btree_iter iter;
+ int ret;
+
+ ret = bch2_inode_peek(trans, &iter, inode, inum, 0);
+ if (!ret)
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum,
+ struct bch_inode_unpacked *inode)
+{
+ return bch2_trans_do(c, NULL, NULL, 0,
+ bch2_inode_find_by_inum_trans(trans, inum, inode));
+}
+
+int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
+{
+ if (bi->bi_flags & BCH_INODE_UNLINKED)
+ bi->bi_flags &= ~BCH_INODE_UNLINKED;
+ else {
+ if (bi->bi_nlink == U32_MAX)
+ return -EINVAL;
+
+ bi->bi_nlink++;
+ }
+
+ return 0;
+}
+
+void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *bi)
+{
+ if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_UNLINKED)) {
+ bch2_trans_inconsistent(trans, "inode %llu unlinked but link count nonzero",
+ bi->bi_inum);
+ return;
+ }
+
+ if (bi->bi_flags & BCH_INODE_UNLINKED) {
+ bch2_trans_inconsistent(trans, "inode %llu link count underflow", bi->bi_inum);
+ return;
+ }
+
+ if (bi->bi_nlink)
+ bi->bi_nlink--;
+ else
+ bi->bi_flags |= BCH_INODE_UNLINKED;
+}
+
+struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode)
+{
+ struct bch_opts ret = { 0 };
+#define x(_name, _bits) \
+ if (inode->bi_##_name) \
+ opt_set(ret, _name, inode->bi_##_name - 1);
+ BCH_INODE_OPTS()
+#undef x
+ return ret;
+}
+
+void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
+ struct bch_inode_unpacked *inode)
+{
+#define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name);
+ BCH_INODE_OPTS()
+#undef x
+
+ if (opts->nocow)
+ opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
+}
+
+int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter = { NULL };
+ struct bkey_i_inode_generation delete;
+ struct bch_inode_unpacked inode_u;
+ struct bkey_s_c k;
+ int ret;
+
+ do {
+ ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
+ SPOS(inum, 0, snapshot),
+ SPOS(inum, U64_MAX, snapshot),
+ 0, NULL) ?:
+ bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
+ SPOS(inum, 0, snapshot),
+ SPOS(inum, U64_MAX, snapshot),
+ 0, NULL) ?:
+ bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
+ SPOS(inum, 0, snapshot),
+ SPOS(inum, U64_MAX, snapshot),
+ 0, NULL);
+ } while (ret == -BCH_ERR_transaction_restart_nested);
+ if (ret)
+ goto err;
+retry:
+ bch2_trans_begin(trans);
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+ SPOS(0, inum, snapshot), BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!bkey_is_inode(k.k)) {
+ bch2_fs_inconsistent(c,
+ "inode %llu:%u not found when deleting",
+ inum, snapshot);
+ ret = -EIO;
+ goto err;
+ }
+
+ bch2_inode_unpack(k, &inode_u);
+
+ /* Subvolume root? */
+ if (inode_u.bi_subvol)
+ bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum);
+
+ bkey_inode_generation_init(&delete.k_i);
+ delete.k.p = iter.pos;
+ delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
+
+ ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ return ret ?: -BCH_ERR_transaction_restart_nested;
+}
+
+static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ if (bch2_snapshot_is_internal_node(c, pos.snapshot))
+ return 0;
+
+ if (!fsck_err_on(c->sb.clean, c,
+ "filesystem marked as clean but have deleted inode %llu:%u",
+ pos.offset, pos.snapshot))
+ return 0;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
+ if (fsck_err_on(!bkey_is_inode(k.k), c,
+ "nonexistent inode %llu:%u in deleted_inodes btree",
+ pos.offset, pos.snapshot))
+ goto delete;
+
+ ret = bch2_inode_unpack(k, &inode);
+ if (ret)
+ goto err;
+
+ if (fsck_err_on(S_ISDIR(inode.bi_mode), c,
+ "directory %llu:%u in deleted_inodes btree",
+ pos.offset, pos.snapshot))
+ goto delete;
+
+ if (fsck_err_on(!(inode.bi_flags & BCH_INODE_UNLINKED), c,
+ "non-deleted inode %llu:%u in deleted_inodes btree",
+ pos.offset, pos.snapshot))
+ goto delete;
+
+ return 1;
+err:
+fsck_err:
+ return ret;
+delete:
+ return bch2_btree_bit_mod(trans, BTREE_ID_deleted_inodes, pos, false);
+}
+
+int bch2_delete_dead_inodes(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_btree_write_buffer_flush_sync(trans);
+ if (ret)
+ goto err;
+
+ /*
+ * Weird transaction restart handling here because on successful delete,
+ * bch2_inode_rm_snapshot() will return a nested transaction restart,
+ * but we can't retry because the btree write buffer won't have been
+ * flushed and we'd spin:
+ */
+ for_each_btree_key(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ ret = lockrestart_do(trans, may_delete_deleted_inode(trans, k.k->p));
+ if (ret < 0)
+ break;
+
+ if (ret) {
+ if (!test_bit(BCH_FS_RW, &c->flags)) {
+ bch2_trans_unlock(trans);
+ bch2_fs_lazy_rw(c);
+ }
+
+ ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ bch2_trans_put(trans);
+
+ return ret;
+}
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
new file mode 100644
index 000000000000..a7464e1b6960
--- /dev/null
+++ b/fs/bcachefs/inode.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_INODE_H
+#define _BCACHEFS_INODE_H
+
+#include "bkey.h"
+#include "opts.h"
+
+enum bkey_invalid_flags;
+extern const char * const bch2_inode_opts[];
+
+int bch2_inode_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_inode_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+int bch2_trans_mark_inode(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_i *, unsigned);
+int bch2_mark_inode(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+
+#define bch2_bkey_ops_inode ((struct bkey_ops) { \
+ .key_invalid = bch2_inode_invalid, \
+ .val_to_text = bch2_inode_to_text, \
+ .trans_trigger = bch2_trans_mark_inode, \
+ .atomic_trigger = bch2_mark_inode, \
+ .min_val_size = 16, \
+})
+
+#define bch2_bkey_ops_inode_v2 ((struct bkey_ops) { \
+ .key_invalid = bch2_inode_v2_invalid, \
+ .val_to_text = bch2_inode_to_text, \
+ .trans_trigger = bch2_trans_mark_inode, \
+ .atomic_trigger = bch2_mark_inode, \
+ .min_val_size = 32, \
+})
+
+#define bch2_bkey_ops_inode_v3 ((struct bkey_ops) { \
+ .key_invalid = bch2_inode_v3_invalid, \
+ .val_to_text = bch2_inode_to_text, \
+ .trans_trigger = bch2_trans_mark_inode, \
+ .atomic_trigger = bch2_mark_inode, \
+ .min_val_size = 48, \
+})
+
+static inline bool bkey_is_inode(const struct bkey *k)
+{
+ return k->type == KEY_TYPE_inode ||
+ k->type == KEY_TYPE_inode_v2 ||
+ k->type == KEY_TYPE_inode_v3;
+}
+
+int bch2_inode_generation_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \
+ .key_invalid = bch2_inode_generation_invalid, \
+ .val_to_text = bch2_inode_generation_to_text, \
+ .min_val_size = 8, \
+})
+
+#if 0
+typedef struct {
+ u64 lo;
+ u32 hi;
+} __packed __aligned(4) u96;
+#endif
+typedef u64 u96;
+
+struct bch_inode_unpacked {
+ u64 bi_inum;
+ u64 bi_journal_seq;
+ __le64 bi_hash_seed;
+ u64 bi_size;
+ u64 bi_sectors;
+ u64 bi_version;
+ u32 bi_flags;
+ u16 bi_mode;
+
+#define x(_name, _bits) u##_bits _name;
+ BCH_INODE_FIELDS_v3()
+#undef x
+};
+
+struct bkey_inode_buf {
+ struct bkey_i_inode_v3 inode;
+
+#define x(_name, _bits) + 8 + _bits / 8
+ u8 _pad[0 + BCH_INODE_FIELDS_v3()];
+#undef x
+} __packed __aligned(8);
+
+void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
+int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *);
+struct bkey_i *bch2_inode_to_v3(struct btree_trans *, struct bkey_i *);
+
+void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *);
+
+int bch2_inode_peek(struct btree_trans *, struct btree_iter *,
+ struct bch_inode_unpacked *, subvol_inum, unsigned);
+int bch2_inode_write(struct btree_trans *, struct btree_iter *,
+ struct bch_inode_unpacked *);
+
+void bch2_inode_init_early(struct bch_fs *,
+ struct bch_inode_unpacked *);
+void bch2_inode_init_late(struct bch_inode_unpacked *, u64,
+ uid_t, gid_t, umode_t, dev_t,
+ struct bch_inode_unpacked *);
+void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *,
+ uid_t, gid_t, umode_t, dev_t,
+ struct bch_inode_unpacked *);
+
+int bch2_inode_create(struct btree_trans *, struct btree_iter *,
+ struct bch_inode_unpacked *, u32, u64);
+
+int bch2_inode_rm(struct bch_fs *, subvol_inum);
+
+int bch2_inode_find_by_inum_nowarn_trans(struct btree_trans *,
+ subvol_inum,
+ struct bch_inode_unpacked *);
+int bch2_inode_find_by_inum_trans(struct btree_trans *, subvol_inum,
+ struct bch_inode_unpacked *);
+int bch2_inode_find_by_inum(struct bch_fs *, subvol_inum,
+ struct bch_inode_unpacked *);
+
+#define inode_opt_get(_c, _inode, _name) \
+ ((_inode)->bi_##_name ? (_inode)->bi_##_name - 1 : (_c)->opts._name)
+
+static inline void bch2_inode_opt_set(struct bch_inode_unpacked *inode,
+ enum inode_opt_id id, u64 v)
+{
+ switch (id) {
+#define x(_name, ...) \
+ case Inode_opt_##_name: \
+ inode->bi_##_name = v; \
+ break;
+ BCH_INODE_OPTS()
+#undef x
+ default:
+ BUG();
+ }
+}
+
+static inline u64 bch2_inode_opt_get(struct bch_inode_unpacked *inode,
+ enum inode_opt_id id)
+{
+ switch (id) {
+#define x(_name, ...) \
+ case Inode_opt_##_name: \
+ return inode->bi_##_name;
+ BCH_INODE_OPTS()
+#undef x
+ default:
+ BUG();
+ }
+}
+
+static inline u8 mode_to_type(umode_t mode)
+{
+ return (mode >> 12) & 15;
+}
+
+static inline u8 inode_d_type(struct bch_inode_unpacked *inode)
+{
+ return inode->bi_subvol ? DT_SUBVOL : mode_to_type(inode->bi_mode);
+}
+
+/* i_nlink: */
+
+static inline unsigned nlink_bias(umode_t mode)
+{
+ return S_ISDIR(mode) ? 2 : 1;
+}
+
+static inline unsigned bch2_inode_nlink_get(struct bch_inode_unpacked *bi)
+{
+ return bi->bi_flags & BCH_INODE_UNLINKED
+ ? 0
+ : bi->bi_nlink + nlink_bias(bi->bi_mode);
+}
+
+static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi,
+ unsigned nlink)
+{
+ if (nlink) {
+ bi->bi_nlink = nlink - nlink_bias(bi->bi_mode);
+ bi->bi_flags &= ~BCH_INODE_UNLINKED;
+ } else {
+ bi->bi_nlink = 0;
+ bi->bi_flags |= BCH_INODE_UNLINKED;
+ }
+}
+
+int bch2_inode_nlink_inc(struct bch_inode_unpacked *);
+void bch2_inode_nlink_dec(struct btree_trans *, struct bch_inode_unpacked *);
+
+struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *);
+void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
+ struct bch_inode_unpacked *);
+
+int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32);
+int bch2_delete_dead_inodes(struct bch_fs *);
+
+#endif /* _BCACHEFS_INODE_H */
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
new file mode 100644
index 000000000000..119834cb8f9e
--- /dev/null
+++ b/fs/bcachefs/io_misc.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * io_misc.c - fallocate, fpunch, truncate:
+ */
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "clock.h"
+#include "error.h"
+#include "extents.h"
+#include "extent_update.h"
+#include "inode.h"
+#include "io_misc.h"
+#include "io_write.h"
+#include "logged_ops.h"
+#include "subvolume.h"
+
+/* Overwrites whatever was present with zeroes: */
+int bch2_extent_fallocate(struct btree_trans *trans,
+ subvol_inum inum,
+ struct btree_iter *iter,
+ unsigned sectors,
+ struct bch_io_opts opts,
+ s64 *i_sectors_delta,
+ struct write_point_specifier write_point)
+{
+ struct bch_fs *c = trans->c;
+ struct disk_reservation disk_res = { 0 };
+ struct closure cl;
+ struct open_buckets open_buckets = { 0 };
+ struct bkey_s_c k;
+ struct bkey_buf old, new;
+ unsigned sectors_allocated = 0;
+ bool have_reservation = false;
+ bool unwritten = opts.nocow &&
+ c->sb.version >= bcachefs_metadata_version_unwritten_extents;
+ int ret;
+
+ bch2_bkey_buf_init(&old);
+ bch2_bkey_buf_init(&new);
+ closure_init_stack(&cl);
+
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
+
+ if (!have_reservation) {
+ unsigned new_replicas =
+ max(0, (int) opts.data_replicas -
+ (int) bch2_bkey_nr_ptrs_fully_allocated(k));
+ /*
+ * Get a disk reservation before (in the nocow case) calling
+ * into the allocator:
+ */
+ ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
+ if (unlikely(ret))
+ goto err;
+
+ bch2_bkey_buf_reassemble(&old, c, k);
+ }
+
+ if (have_reservation) {
+ if (!bch2_extents_match(k, bkey_i_to_s_c(old.k)))
+ goto err;
+
+ bch2_key_resize(&new.k->k, sectors);
+ } else if (!unwritten) {
+ struct bkey_i_reservation *reservation;
+
+ bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
+ reservation = bkey_reservation_init(new.k);
+ reservation->k.p = iter->pos;
+ bch2_key_resize(&reservation->k, sectors);
+ reservation->v.nr_replicas = opts.data_replicas;
+ } else {
+ struct bkey_i_extent *e;
+ struct bch_devs_list devs_have;
+ struct write_point *wp;
+ struct bch_extent_ptr *ptr;
+
+ devs_have.nr = 0;
+
+ bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX);
+
+ e = bkey_extent_init(new.k);
+ e->k.p = iter->pos;
+
+ ret = bch2_alloc_sectors_start_trans(trans,
+ opts.foreground_target,
+ false,
+ write_point,
+ &devs_have,
+ opts.data_replicas,
+ opts.data_replicas,
+ BCH_WATERMARK_normal, 0, &cl, &wp);
+ if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
+ ret = -BCH_ERR_transaction_restart_nested;
+ if (ret)
+ goto err;
+
+ sectors = min(sectors, wp->sectors_free);
+ sectors_allocated = sectors;
+
+ bch2_key_resize(&e->k, sectors);
+
+ bch2_open_bucket_get(c, wp, &open_buckets);
+ bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
+ bch2_alloc_sectors_done(c, wp);
+
+ extent_for_each_ptr(extent_i_to_s(e), ptr)
+ ptr->unwritten = true;
+ }
+
+ have_reservation = true;
+
+ ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
+ 0, i_sectors_delta, true);
+err:
+ if (!ret && sectors_allocated)
+ bch2_increment_clock(c, sectors_allocated, WRITE);
+
+ bch2_open_buckets_put(c, &open_buckets);
+ bch2_disk_reservation_put(c, &disk_res);
+ bch2_bkey_buf_exit(&new, c);
+ bch2_bkey_buf_exit(&old, c);
+
+ if (closure_nr_remaining(&cl) != 1) {
+ bch2_trans_unlock(trans);
+ closure_sync(&cl);
+ }
+
+ return ret;
+}
+
+/*
+ * Returns -BCH_ERR_transacton_restart if we had to drop locks:
+ */
+int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
+ subvol_inum inum, u64 end,
+ s64 *i_sectors_delta)
+{
+ struct bch_fs *c = trans->c;
+ unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
+ struct bpos end_pos = POS(inum.inum, end);
+ struct bkey_s_c k;
+ int ret = 0, ret2 = 0;
+ u32 snapshot;
+
+ while (!ret ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ struct disk_reservation disk_res =
+ bch2_disk_reservation_init(c, 0);
+ struct bkey_i delete;
+
+ if (ret)
+ ret2 = ret;
+
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ continue;
+
+ bch2_btree_iter_set_snapshot(iter, snapshot);
+
+ /*
+ * peek_upto() doesn't have ideal semantics for extents:
+ */
+ k = bch2_btree_iter_peek_upto(iter, end_pos);
+ if (!k.k)
+ break;
+
+ ret = bkey_err(k);
+ if (ret)
+ continue;
+
+ bkey_init(&delete.k);
+ delete.k.p = iter->pos;
+
+ /* create the biggest key we can */
+ bch2_key_resize(&delete.k, max_sectors);
+ bch2_cut_back(end_pos, &delete);
+
+ ret = bch2_extent_update(trans, inum, iter, &delete,
+ &disk_res, 0, i_sectors_delta, false);
+ bch2_disk_reservation_put(c, &disk_res);
+ }
+
+ return ret ?: ret2;
+}
+
+int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
+ s64 *i_sectors_delta)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ POS(inum.inum, start),
+ BTREE_ITER_INTENT);
+
+ ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
+
+ return ret;
+}
+
+/* truncate: */
+
+void bch2_logged_op_truncate_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_logged_op_truncate op = bkey_s_c_to_logged_op_truncate(k);
+
+ prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
+ prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
+ prt_printf(out, " new_i_size=%llu", le64_to_cpu(op.v->new_i_size));
+}
+
+static int truncate_set_isize(struct btree_trans *trans,
+ subvol_inum inum,
+ u64 new_i_size)
+{
+ struct btree_iter iter = { NULL };
+ struct bch_inode_unpacked inode_u;
+ int ret;
+
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT) ?:
+ (inode_u.bi_size = new_i_size, 0) ?:
+ bch2_inode_write(trans, &iter, &inode_u);
+
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
+ struct bkey_i *op_k,
+ u64 *i_sectors_delta)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter fpunch_iter;
+ struct bkey_i_logged_op_truncate *op = bkey_i_to_logged_op_truncate(op_k);
+ subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
+ u64 new_i_size = le64_to_cpu(op->v.new_i_size);
+ int ret;
+
+ ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ truncate_set_isize(trans, inum, new_i_size));
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
+ POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
+ BTREE_ITER_INTENT);
+ ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
+ bch2_trans_iter_exit(trans, &fpunch_iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ ret = 0;
+err:
+ bch2_logged_op_finish(trans, op_k);
+ return ret;
+}
+
+int bch2_resume_logged_op_truncate(struct btree_trans *trans, struct bkey_i *op_k)
+{
+ return __bch2_resume_logged_op_truncate(trans, op_k, NULL);
+}
+
+int bch2_truncate(struct bch_fs *c, subvol_inum inum, u64 new_i_size, u64 *i_sectors_delta)
+{
+ struct bkey_i_logged_op_truncate op;
+
+ bkey_logged_op_truncate_init(&op.k_i);
+ op.v.subvol = cpu_to_le32(inum.subvol);
+ op.v.inum = cpu_to_le64(inum.inum);
+ op.v.new_i_size = cpu_to_le64(new_i_size);
+
+ /*
+ * Logged ops aren't atomic w.r.t. snapshot creation: creating a
+ * snapshot while they're in progress, then crashing, will result in the
+ * resume only proceeding in one of the snapshots
+ */
+ down_read(&c->snapshot_create_lock);
+ int ret = bch2_trans_run(c,
+ bch2_logged_op_start(trans, &op.k_i) ?:
+ __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta));
+ up_read(&c->snapshot_create_lock);
+
+ return ret;
+}
+
+/* finsert/fcollapse: */
+
+void bch2_logged_op_finsert_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_logged_op_finsert op = bkey_s_c_to_logged_op_finsert(k);
+
+ prt_printf(out, "subvol=%u", le32_to_cpu(op.v->subvol));
+ prt_printf(out, " inum=%llu", le64_to_cpu(op.v->inum));
+ prt_printf(out, " dst_offset=%lli", le64_to_cpu(op.v->dst_offset));
+ prt_printf(out, " src_offset=%llu", le64_to_cpu(op.v->src_offset));
+}
+
+static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, u64 offset, s64 len)
+{
+ struct btree_iter iter;
+ struct bch_inode_unpacked inode_u;
+ int ret;
+
+ offset <<= 9;
+ len <<= 9;
+
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
+ if (ret)
+ return ret;
+
+ if (len > 0) {
+ if (MAX_LFS_FILESIZE - inode_u.bi_size < len) {
+ ret = -EFBIG;
+ goto err;
+ }
+
+ if (offset >= inode_u.bi_size) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ inode_u.bi_size += len;
+ inode_u.bi_mtime = inode_u.bi_ctime = bch2_current_time(trans->c);
+
+ ret = bch2_inode_write(trans, &iter, &inode_u);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
+ struct bkey_i *op_k,
+ u64 *i_sectors_delta)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
+ subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
+ u64 dst_offset = le64_to_cpu(op->v.dst_offset);
+ u64 src_offset = le64_to_cpu(op->v.src_offset);
+ s64 shift = dst_offset - src_offset;
+ u64 len = abs(shift);
+ u64 pos = le64_to_cpu(op->v.pos);
+ bool insert = shift > 0;
+ int ret = 0;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ POS(inum.inum, 0),
+ BTREE_ITER_INTENT);
+
+ switch (op->v.state) {
+case LOGGED_OP_FINSERT_start:
+ op->v.state = LOGGED_OP_FINSERT_shift_extents;
+
+ if (insert) {
+ ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ adjust_i_size(trans, inum, src_offset, len) ?:
+ bch2_logged_op_update(trans, &op->k_i));
+ if (ret)
+ goto err;
+ } else {
+ bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
+
+ ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto err;
+
+ ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_logged_op_update(trans, &op->k_i));
+ }
+
+ fallthrough;
+case LOGGED_OP_FINSERT_shift_extents:
+ while (1) {
+ struct disk_reservation disk_res =
+ bch2_disk_reservation_init(c, 0);
+ struct bkey_i delete, *copy;
+ struct bkey_s_c k;
+ struct bpos src_pos = POS(inum.inum, src_offset);
+ u32 snapshot;
+
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto btree_err;
+
+ bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
+
+ k = insert
+ ? bch2_btree_iter_peek_prev(&iter)
+ : bch2_btree_iter_peek_upto(&iter, POS(inum.inum, U64_MAX));
+ if ((ret = bkey_err(k)))
+ goto btree_err;
+
+ if (!k.k ||
+ k.k->p.inode != inum.inum ||
+ bkey_le(k.k->p, POS(inum.inum, src_offset)))
+ break;
+
+ copy = bch2_bkey_make_mut_noupdate(trans, k);
+ if ((ret = PTR_ERR_OR_ZERO(copy)))
+ goto btree_err;
+
+ if (insert &&
+ bkey_lt(bkey_start_pos(k.k), src_pos)) {
+ bch2_cut_front(src_pos, copy);
+
+ /* Splitting compressed extent? */
+ bch2_disk_reservation_add(c, &disk_res,
+ copy->k.size *
+ bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy)),
+ BCH_DISK_RESERVATION_NOFAIL);
+ }
+
+ bkey_init(&delete.k);
+ delete.k.p = copy->k.p;
+ delete.k.p.snapshot = snapshot;
+ delete.k.size = copy->k.size;
+
+ copy->k.p.offset += shift;
+ copy->k.p.snapshot = snapshot;
+
+ op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
+
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
+ bch2_logged_op_update(trans, &op->k_i) ?:
+ bch2_trans_commit(trans, &disk_res, NULL, BTREE_INSERT_NOFAIL);
+btree_err:
+ bch2_disk_reservation_put(c, &disk_res);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ goto err;
+
+ pos = le64_to_cpu(op->v.pos);
+ }
+
+ op->v.state = LOGGED_OP_FINSERT_finish;
+
+ if (!insert) {
+ ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ adjust_i_size(trans, inum, src_offset, shift) ?:
+ bch2_logged_op_update(trans, &op->k_i));
+ } else {
+ /* We need an inode update to update bi_journal_seq for fsync: */
+ ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ adjust_i_size(trans, inum, 0, 0) ?:
+ bch2_logged_op_update(trans, &op->k_i));
+ }
+
+ break;
+case LOGGED_OP_FINSERT_finish:
+ break;
+ }
+err:
+ bch2_logged_op_finish(trans, op_k);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_resume_logged_op_finsert(struct btree_trans *trans, struct bkey_i *op_k)
+{
+ return __bch2_resume_logged_op_finsert(trans, op_k, NULL);
+}
+
+int bch2_fcollapse_finsert(struct bch_fs *c, subvol_inum inum,
+ u64 offset, u64 len, bool insert,
+ s64 *i_sectors_delta)
+{
+ struct bkey_i_logged_op_finsert op;
+ s64 shift = insert ? len : -len;
+
+ bkey_logged_op_finsert_init(&op.k_i);
+ op.v.subvol = cpu_to_le32(inum.subvol);
+ op.v.inum = cpu_to_le64(inum.inum);
+ op.v.dst_offset = cpu_to_le64(offset + shift);
+ op.v.src_offset = cpu_to_le64(offset);
+ op.v.pos = cpu_to_le64(insert ? U64_MAX : offset);
+
+ /*
+ * Logged ops aren't atomic w.r.t. snapshot creation: creating a
+ * snapshot while they're in progress, then crashing, will result in the
+ * resume only proceeding in one of the snapshots
+ */
+ down_read(&c->snapshot_create_lock);
+ int ret = bch2_trans_run(c,
+ bch2_logged_op_start(trans, &op.k_i) ?:
+ __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta));
+ up_read(&c->snapshot_create_lock);
+
+ return ret;
+}
diff --git a/fs/bcachefs/io_misc.h b/fs/bcachefs/io_misc.h
new file mode 100644
index 000000000000..c9e6ed40e1b8
--- /dev/null
+++ b/fs/bcachefs/io_misc.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_IO_MISC_H
+#define _BCACHEFS_IO_MISC_H
+
+int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
+ unsigned, struct bch_io_opts, s64 *,
+ struct write_point_specifier);
+int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
+ subvol_inum, u64, s64 *);
+int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
+
+void bch2_logged_op_truncate_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_logged_op_truncate ((struct bkey_ops) { \
+ .val_to_text = bch2_logged_op_truncate_to_text, \
+ .min_val_size = 24, \
+})
+
+int bch2_resume_logged_op_truncate(struct btree_trans *, struct bkey_i *);
+
+int bch2_truncate(struct bch_fs *, subvol_inum, u64, u64 *);
+
+void bch2_logged_op_finsert_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_logged_op_finsert ((struct bkey_ops) { \
+ .val_to_text = bch2_logged_op_finsert_to_text, \
+ .min_val_size = 24, \
+})
+
+int bch2_resume_logged_op_finsert(struct btree_trans *, struct bkey_i *);
+
+int bch2_fcollapse_finsert(struct bch_fs *, subvol_inum, u64, u64, bool, s64 *);
+
+#endif /* _BCACHEFS_IO_MISC_H */
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
new file mode 100644
index 000000000000..443c3ea65527
--- /dev/null
+++ b/fs/bcachefs/io_read.c
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Some low level IO code, and hacks for various block layer limitations
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "checksum.h"
+#include "clock.h"
+#include "compress.h"
+#include "data_update.h"
+#include "disk_groups.h"
+#include "ec.h"
+#include "error.h"
+#include "io_read.h"
+#include "io_misc.h"
+#include "io_write.h"
+#include "subvolume.h"
+#include "trace.h"
+
+#include <linux/sched/mm.h>
+
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
+
+static bool bch2_target_congested(struct bch_fs *c, u16 target)
+{
+ const struct bch_devs_mask *devs;
+ unsigned d, nr = 0, total = 0;
+ u64 now = local_clock(), last;
+ s64 congested;
+ struct bch_dev *ca;
+
+ if (!target)
+ return false;
+
+ rcu_read_lock();
+ devs = bch2_target_to_mask(c, target) ?:
+ &c->rw_devs[BCH_DATA_user];
+
+ for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
+ ca = rcu_dereference(c->devs[d]);
+ if (!ca)
+ continue;
+
+ congested = atomic_read(&ca->congested);
+ last = READ_ONCE(ca->congested_last);
+ if (time_after64(now, last))
+ congested -= (now - last) >> 12;
+
+ total += max(congested, 0LL);
+ nr++;
+ }
+ rcu_read_unlock();
+
+ return bch2_rand_range(nr * CONGESTED_MAX) < total;
+}
+
+#else
+
+static bool bch2_target_congested(struct bch_fs *c, u16 target)
+{
+ return false;
+}
+
+#endif
+
+/* Cache promotion on read */
+
+struct promote_op {
+ struct rcu_head rcu;
+ u64 start_time;
+
+ struct rhash_head hash;
+ struct bpos pos;
+
+ struct data_update write;
+ struct bio_vec bi_inline_vecs[0]; /* must be last */
+};
+
+static const struct rhashtable_params bch_promote_params = {
+ .head_offset = offsetof(struct promote_op, hash),
+ .key_offset = offsetof(struct promote_op, pos),
+ .key_len = sizeof(struct bpos),
+};
+
+static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
+ struct bpos pos,
+ struct bch_io_opts opts,
+ unsigned flags)
+{
+ BUG_ON(!opts.promote_target);
+
+ if (!(flags & BCH_READ_MAY_PROMOTE))
+ return -BCH_ERR_nopromote_may_not;
+
+ if (bch2_bkey_has_target(c, k, opts.promote_target))
+ return -BCH_ERR_nopromote_already_promoted;
+
+ if (bkey_extent_is_unwritten(k))
+ return -BCH_ERR_nopromote_unwritten;
+
+ if (bch2_target_congested(c, opts.promote_target))
+ return -BCH_ERR_nopromote_congested;
+
+ if (rhashtable_lookup_fast(&c->promote_table, &pos,
+ bch_promote_params))
+ return -BCH_ERR_nopromote_in_flight;
+
+ return 0;
+}
+
+static void promote_free(struct bch_fs *c, struct promote_op *op)
+{
+ int ret;
+
+ bch2_data_update_exit(&op->write);
+
+ ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
+ bch_promote_params);
+ BUG_ON(ret);
+ bch2_write_ref_put(c, BCH_WRITE_REF_promote);
+ kfree_rcu(op, rcu);
+}
+
+static void promote_done(struct bch_write_op *wop)
+{
+ struct promote_op *op =
+ container_of(wop, struct promote_op, write.op);
+ struct bch_fs *c = op->write.op.c;
+
+ bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
+ op->start_time);
+ promote_free(c, op);
+}
+
+static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
+{
+ struct bio *bio = &op->write.op.wbio.bio;
+
+ trace_and_count(op->write.op.c, read_promote, &rbio->bio);
+
+ /* we now own pages: */
+ BUG_ON(!rbio->bounce);
+ BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
+
+ memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
+ sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
+ swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
+
+ bch2_data_update_read_done(&op->write, rbio->pick.crc);
+}
+
+static struct promote_op *__promote_alloc(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bkey_s_c k,
+ struct bpos pos,
+ struct extent_ptr_decoded *pick,
+ struct bch_io_opts opts,
+ unsigned sectors,
+ struct bch_read_bio **rbio)
+{
+ struct bch_fs *c = trans->c;
+ struct promote_op *op = NULL;
+ struct bio *bio;
+ unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
+ int ret;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
+ return NULL;
+
+ op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS);
+ if (!op)
+ goto err;
+
+ op->start_time = local_clock();
+ op->pos = pos;
+
+ /*
+ * We don't use the mempool here because extents that aren't
+ * checksummed or compressed can be too big for the mempool:
+ */
+ *rbio = kzalloc(sizeof(struct bch_read_bio) +
+ sizeof(struct bio_vec) * pages,
+ GFP_NOFS);
+ if (!*rbio)
+ goto err;
+
+ rbio_init(&(*rbio)->bio, opts);
+ bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
+
+ if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
+ GFP_NOFS))
+ goto err;
+
+ (*rbio)->bounce = true;
+ (*rbio)->split = true;
+ (*rbio)->kmalloc = true;
+
+ if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
+ bch_promote_params))
+ goto err;
+
+ bio = &op->write.op.wbio.bio;
+ bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
+
+ ret = bch2_data_update_init(trans, NULL, &op->write,
+ writepoint_hashed((unsigned long) current),
+ opts,
+ (struct data_update_opts) {
+ .target = opts.promote_target,
+ .extra_replicas = 1,
+ .write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
+ },
+ btree_id, k);
+ /*
+ * possible errors: -BCH_ERR_nocow_lock_blocked,
+ * -BCH_ERR_ENOSPC_disk_reservation:
+ */
+ if (ret) {
+ ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
+ bch_promote_params);
+ BUG_ON(ret);
+ goto err;
+ }
+
+ op->write.op.end_io = promote_done;
+
+ return op;
+err:
+ if (*rbio)
+ bio_free_pages(&(*rbio)->bio);
+ kfree(*rbio);
+ *rbio = NULL;
+ kfree(op);
+ bch2_write_ref_put(c, BCH_WRITE_REF_promote);
+ return NULL;
+}
+
+noinline
+static struct promote_op *promote_alloc(struct btree_trans *trans,
+ struct bvec_iter iter,
+ struct bkey_s_c k,
+ struct extent_ptr_decoded *pick,
+ struct bch_io_opts opts,
+ unsigned flags,
+ struct bch_read_bio **rbio,
+ bool *bounce,
+ bool *read_full)
+{
+ struct bch_fs *c = trans->c;
+ bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
+ /* data might have to be decompressed in the write path: */
+ unsigned sectors = promote_full
+ ? max(pick->crc.compressed_size, pick->crc.live_size)
+ : bvec_iter_sectors(iter);
+ struct bpos pos = promote_full
+ ? bkey_start_pos(k.k)
+ : POS(k.k->p.inode, iter.bi_sector);
+ struct promote_op *promote;
+ int ret;
+
+ ret = should_promote(c, k, pos, opts, flags);
+ if (ret)
+ goto nopromote;
+
+ promote = __promote_alloc(trans,
+ k.k->type == KEY_TYPE_reflink_v
+ ? BTREE_ID_reflink
+ : BTREE_ID_extents,
+ k, pos, pick, opts, sectors, rbio);
+ if (!promote) {
+ ret = -BCH_ERR_nopromote_enomem;
+ goto nopromote;
+ }
+
+ *bounce = true;
+ *read_full = promote_full;
+ return promote;
+nopromote:
+ trace_read_nopromote(c, ret);
+ return NULL;
+}
+
+/* Read */
+
+#define READ_RETRY_AVOID 1
+#define READ_RETRY 2
+#define READ_ERR 3
+
+enum rbio_context {
+ RBIO_CONTEXT_NULL,
+ RBIO_CONTEXT_HIGHPRI,
+ RBIO_CONTEXT_UNBOUND,
+};
+
+static inline struct bch_read_bio *
+bch2_rbio_parent(struct bch_read_bio *rbio)
+{
+ return rbio->split ? rbio->parent : rbio;
+}
+
+__always_inline
+static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
+ enum rbio_context context,
+ struct workqueue_struct *wq)
+{
+ if (context <= rbio->context) {
+ fn(&rbio->work);
+ } else {
+ rbio->work.func = fn;
+ rbio->context = context;
+ queue_work(wq, &rbio->work);
+ }
+}
+
+static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
+{
+ BUG_ON(rbio->bounce && !rbio->split);
+
+ if (rbio->promote)
+ promote_free(rbio->c, rbio->promote);
+ rbio->promote = NULL;
+
+ if (rbio->bounce)
+ bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
+
+ if (rbio->split) {
+ struct bch_read_bio *parent = rbio->parent;
+
+ if (rbio->kmalloc)
+ kfree(rbio);
+ else
+ bio_put(&rbio->bio);
+
+ rbio = parent;
+ }
+
+ return rbio;
+}
+
+/*
+ * Only called on a top level bch_read_bio to complete an entire read request,
+ * not a split:
+ */
+static void bch2_rbio_done(struct bch_read_bio *rbio)
+{
+ if (rbio->start_time)
+ bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
+ rbio->start_time);
+ bio_endio(&rbio->bio);
+}
+
+static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
+ struct bvec_iter bvec_iter,
+ struct bch_io_failures *failed,
+ unsigned flags)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_buf sk;
+ struct bkey_s_c k;
+ int ret;
+
+ flags &= ~BCH_READ_LAST_FRAGMENT;
+ flags |= BCH_READ_MUST_CLONE;
+
+ bch2_bkey_buf_init(&sk);
+
+ bch2_trans_iter_init(trans, &iter, rbio->data_btree,
+ rbio->read_pos, BTREE_ITER_SLOTS);
+retry:
+ rbio->bio.bi_status = 0;
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ if (bkey_err(k))
+ goto err;
+
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
+ bch2_trans_unlock(trans);
+
+ if (!bch2_bkey_matches_ptr(c, k,
+ rbio->pick.ptr,
+ rbio->data_pos.offset -
+ rbio->pick.crc.offset)) {
+ /* extent we wanted to read no longer exists: */
+ rbio->hole = true;
+ goto out;
+ }
+
+ ret = __bch2_read_extent(trans, rbio, bvec_iter,
+ rbio->read_pos,
+ rbio->data_btree,
+ k, 0, failed, flags);
+ if (ret == READ_RETRY)
+ goto retry;
+ if (ret)
+ goto err;
+out:
+ bch2_rbio_done(rbio);
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&sk, c);
+ return;
+err:
+ rbio->bio.bi_status = BLK_STS_IOERR;
+ goto out;
+}
+
+static void bch2_rbio_retry(struct work_struct *work)
+{
+ struct bch_read_bio *rbio =
+ container_of(work, struct bch_read_bio, work);
+ struct bch_fs *c = rbio->c;
+ struct bvec_iter iter = rbio->bvec_iter;
+ unsigned flags = rbio->flags;
+ subvol_inum inum = {
+ .subvol = rbio->subvol,
+ .inum = rbio->read_pos.inode,
+ };
+ struct bch_io_failures failed = { .nr = 0 };
+
+ trace_and_count(c, read_retry, &rbio->bio);
+
+ if (rbio->retry == READ_RETRY_AVOID)
+ bch2_mark_io_failure(&failed, &rbio->pick);
+
+ rbio->bio.bi_status = 0;
+
+ rbio = bch2_rbio_free(rbio);
+
+ flags |= BCH_READ_IN_RETRY;
+ flags &= ~BCH_READ_MAY_PROMOTE;
+
+ if (flags & BCH_READ_NODECODE) {
+ bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
+ } else {
+ flags &= ~BCH_READ_LAST_FRAGMENT;
+ flags |= BCH_READ_MUST_CLONE;
+
+ __bch2_read(c, rbio, iter, inum, &failed, flags);
+ }
+}
+
+static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
+ blk_status_t error)
+{
+ rbio->retry = retry;
+
+ if (rbio->flags & BCH_READ_IN_RETRY)
+ return;
+
+ if (retry == READ_ERR) {
+ rbio = bch2_rbio_free(rbio);
+
+ rbio->bio.bi_status = error;
+ bch2_rbio_done(rbio);
+ } else {
+ bch2_rbio_punt(rbio, bch2_rbio_retry,
+ RBIO_CONTEXT_UNBOUND, system_unbound_wq);
+ }
+}
+
+static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
+ struct bch_read_bio *rbio)
+{
+ struct bch_fs *c = rbio->c;
+ u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
+ struct bch_extent_crc_unpacked new_crc;
+ struct btree_iter iter;
+ struct bkey_i *new;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ if (crc_is_compressed(rbio->pick.crc))
+ return 0;
+
+ k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ if ((ret = bkey_err(k)))
+ goto out;
+
+ if (bversion_cmp(k.k->version, rbio->version) ||
+ !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
+ goto out;
+
+ /* Extent was merged? */
+ if (bkey_start_offset(k.k) < data_offset ||
+ k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
+ goto out;
+
+ if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
+ rbio->pick.crc, NULL, &new_crc,
+ bkey_start_offset(k.k) - data_offset, k.k->size,
+ rbio->pick.crc.csum_type)) {
+ bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * going to be temporarily appending another checksum entry:
+ */
+ new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
+ sizeof(struct bch_extent_crc128));
+ if ((ret = PTR_ERR_OR_ZERO(new)))
+ goto out;
+
+ bkey_reassemble(new, k);
+
+ if (!bch2_bkey_narrow_crcs(new, new_crc))
+ goto out;
+
+ ret = bch2_trans_update(trans, &iter, new,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
+{
+ bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL,
+ __bch2_rbio_narrow_crcs(trans, rbio));
+}
+
+/* Inner part that may run in process context */
+static void __bch2_read_endio(struct work_struct *work)
+{
+ struct bch_read_bio *rbio =
+ container_of(work, struct bch_read_bio, work);
+ struct bch_fs *c = rbio->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
+ struct bio *src = &rbio->bio;
+ struct bio *dst = &bch2_rbio_parent(rbio)->bio;
+ struct bvec_iter dst_iter = rbio->bvec_iter;
+ struct bch_extent_crc_unpacked crc = rbio->pick.crc;
+ struct nonce nonce = extent_nonce(rbio->version, crc);
+ unsigned nofs_flags;
+ struct bch_csum csum;
+ int ret;
+
+ nofs_flags = memalloc_nofs_save();
+
+ /* Reset iterator for checksumming and copying bounced data: */
+ if (rbio->bounce) {
+ src->bi_iter.bi_size = crc.compressed_size << 9;
+ src->bi_iter.bi_idx = 0;
+ src->bi_iter.bi_bvec_done = 0;
+ } else {
+ src->bi_iter = rbio->bvec_iter;
+ }
+
+ csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
+ if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
+ goto csum_err;
+
+ /*
+ * XXX
+ * We need to rework the narrow_crcs path to deliver the read completion
+ * first, and then punt to a different workqueue, otherwise we're
+ * holding up reads while doing btree updates which is bad for memory
+ * reclaim.
+ */
+ if (unlikely(rbio->narrow_crcs))
+ bch2_rbio_narrow_crcs(rbio);
+
+ if (rbio->flags & BCH_READ_NODECODE)
+ goto nodecode;
+
+ /* Adjust crc to point to subset of data we want: */
+ crc.offset += rbio->offset_into_extent;
+ crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
+
+ if (crc_is_compressed(crc)) {
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
+
+ if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
+ !c->opts.no_data_io)
+ goto decompression_err;
+ } else {
+ /* don't need to decrypt the entire bio: */
+ nonce = nonce_add(nonce, crc.offset << 9);
+ bio_advance(src, crc.offset << 9);
+
+ BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
+ src->bi_iter.bi_size = dst_iter.bi_size;
+
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
+
+ if (rbio->bounce) {
+ struct bvec_iter src_iter = src->bi_iter;
+
+ bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
+ }
+ }
+
+ if (rbio->promote) {
+ /*
+ * Re encrypt data we decrypted, so it's consistent with
+ * rbio->crc:
+ */
+ ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
+ if (ret)
+ goto decrypt_err;
+
+ promote_start(rbio->promote, rbio);
+ rbio->promote = NULL;
+ }
+nodecode:
+ if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
+ rbio = bch2_rbio_free(rbio);
+ bch2_rbio_done(rbio);
+ }
+out:
+ memalloc_nofs_restore(nofs_flags);
+ return;
+csum_err:
+ /*
+ * Checksum error: if the bio wasn't bounced, we may have been
+ * reading into buffers owned by userspace (that userspace can
+ * scribble over) - retry the read, bouncing it this time:
+ */
+ if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
+ rbio->flags |= BCH_READ_MUST_BOUNCE;
+ bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
+ goto out;
+ }
+
+ bch_err_inum_offset_ratelimited(ca,
+ rbio->read_pos.inode,
+ rbio->read_pos.offset << 9,
+ "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
+ rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
+ csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
+ bch2_io_error(ca);
+ bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
+ goto out;
+decompression_err:
+ bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
+ rbio->read_pos.offset << 9,
+ "decompression error");
+ bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
+ goto out;
+decrypt_err:
+ bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
+ rbio->read_pos.offset << 9,
+ "decrypt error");
+ bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
+ goto out;
+}
+
+static void bch2_read_endio(struct bio *bio)
+{
+ struct bch_read_bio *rbio =
+ container_of(bio, struct bch_read_bio, bio);
+ struct bch_fs *c = rbio->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
+ struct workqueue_struct *wq = NULL;
+ enum rbio_context context = RBIO_CONTEXT_NULL;
+
+ if (rbio->have_ioref) {
+ bch2_latency_acct(ca, rbio->submit_time, READ);
+ percpu_ref_put(&ca->io_ref);
+ }
+
+ if (!rbio->split)
+ rbio->bio.bi_end_io = rbio->end_io;
+
+ if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
+ rbio->read_pos.inode,
+ rbio->read_pos.offset,
+ "data read error: %s",
+ bch2_blk_status_to_str(bio->bi_status))) {
+ bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
+ return;
+ }
+
+ if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
+ ptr_stale(ca, &rbio->pick.ptr)) {
+ trace_and_count(c, read_reuse_race, &rbio->bio);
+
+ if (rbio->flags & BCH_READ_RETRY_IF_STALE)
+ bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
+ else
+ bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
+ return;
+ }
+
+ if (rbio->narrow_crcs ||
+ rbio->promote ||
+ crc_is_compressed(rbio->pick.crc) ||
+ bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
+ context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
+ else if (rbio->pick.crc.csum_type)
+ context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
+
+ bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
+}
+
+int __bch2_read_indirect_extent(struct btree_trans *trans,
+ unsigned *offset_into_extent,
+ struct bkey_buf *orig_k)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u64 reflink_offset;
+ int ret;
+
+ reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
+ *offset_into_extent;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink,
+ POS(0, reflink_offset), 0);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (k.k->type != KEY_TYPE_reflink_v &&
+ k.k->type != KEY_TYPE_indirect_inline_data) {
+ bch_err_inum_offset_ratelimited(trans->c,
+ orig_k->k->k.p.inode,
+ orig_k->k->k.p.offset << 9,
+ "%llu len %u points to nonexistent indirect extent %llu",
+ orig_k->k->k.p.offset,
+ orig_k->k->k.size,
+ reflink_offset);
+ bch2_inconsistent_error(trans->c);
+ ret = -EIO;
+ goto err;
+ }
+
+ *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
+ bch2_bkey_buf_reassemble(orig_k, trans->c, k);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
+ struct bkey_s_c k,
+ struct bch_extent_ptr ptr)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
+ struct btree_iter iter;
+ struct printbuf buf = PRINTBUF;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+ PTR_BUCKET_POS(c, &ptr),
+ BTREE_ITER_CACHED);
+
+ prt_printf(&buf, "Attempting to read from stale dirty pointer:");
+ printbuf_indent_add(&buf, 2);
+ prt_newline(&buf);
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
+
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ if (!ret) {
+ prt_newline(&buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ }
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+}
+
+int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
+ struct bvec_iter iter, struct bpos read_pos,
+ enum btree_id data_btree, struct bkey_s_c k,
+ unsigned offset_into_extent,
+ struct bch_io_failures *failed, unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct extent_ptr_decoded pick;
+ struct bch_read_bio *rbio = NULL;
+ struct bch_dev *ca = NULL;
+ struct promote_op *promote = NULL;
+ bool bounce = false, read_full = false, narrow_crcs = false;
+ struct bpos data_pos = bkey_start_pos(k.k);
+ int pick_ret;
+
+ if (bkey_extent_is_inline_data(k.k)) {
+ unsigned bytes = min_t(unsigned, iter.bi_size,
+ bkey_inline_data_bytes(k.k));
+
+ swap(iter.bi_size, bytes);
+ memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
+ swap(iter.bi_size, bytes);
+ bio_advance_iter(&orig->bio, &iter, bytes);
+ zero_fill_bio_iter(&orig->bio, iter);
+ goto out_read_done;
+ }
+retry_pick:
+ pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
+
+ /* hole or reservation - just zero fill: */
+ if (!pick_ret)
+ goto hole;
+
+ if (pick_ret < 0) {
+ bch_err_inum_offset_ratelimited(c,
+ read_pos.inode, read_pos.offset << 9,
+ "no device to read from");
+ goto err;
+ }
+
+ ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+
+ /*
+ * Stale dirty pointers are treated as IO errors, but @failed isn't
+ * allocated unless we're in the retry path - so if we're not in the
+ * retry path, don't check here, it'll be caught in bch2_read_endio()
+ * and we'll end up in the retry path:
+ */
+ if ((flags & BCH_READ_IN_RETRY) &&
+ !pick.ptr.cached &&
+ unlikely(ptr_stale(ca, &pick.ptr))) {
+ read_from_stale_dirty_pointer(trans, k, pick.ptr);
+ bch2_mark_io_failure(failed, &pick);
+ goto retry_pick;
+ }
+
+ /*
+ * Unlock the iterator while the btree node's lock is still in
+ * cache, before doing the IO:
+ */
+ bch2_trans_unlock(trans);
+
+ if (flags & BCH_READ_NODECODE) {
+ /*
+ * can happen if we retry, and the extent we were going to read
+ * has been merged in the meantime:
+ */
+ if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
+ goto hole;
+
+ iter.bi_size = pick.crc.compressed_size << 9;
+ goto get_bio;
+ }
+
+ if (!(flags & BCH_READ_LAST_FRAGMENT) ||
+ bio_flagged(&orig->bio, BIO_CHAIN))
+ flags |= BCH_READ_MUST_CLONE;
+
+ narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
+ bch2_can_narrow_extent_crcs(k, pick.crc);
+
+ if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
+ flags |= BCH_READ_MUST_BOUNCE;
+
+ EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
+
+ if (crc_is_compressed(pick.crc) ||
+ (pick.crc.csum_type != BCH_CSUM_none &&
+ (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
+ (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
+ (flags & BCH_READ_USER_MAPPED)) ||
+ (flags & BCH_READ_MUST_BOUNCE)))) {
+ read_full = true;
+ bounce = true;
+ }
+
+ if (orig->opts.promote_target)
+ promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags,
+ &rbio, &bounce, &read_full);
+
+ if (!read_full) {
+ EBUG_ON(crc_is_compressed(pick.crc));
+ EBUG_ON(pick.crc.csum_type &&
+ (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
+ bvec_iter_sectors(iter) != pick.crc.live_size ||
+ pick.crc.offset ||
+ offset_into_extent));
+
+ data_pos.offset += offset_into_extent;
+ pick.ptr.offset += pick.crc.offset +
+ offset_into_extent;
+ offset_into_extent = 0;
+ pick.crc.compressed_size = bvec_iter_sectors(iter);
+ pick.crc.uncompressed_size = bvec_iter_sectors(iter);
+ pick.crc.offset = 0;
+ pick.crc.live_size = bvec_iter_sectors(iter);
+ }
+get_bio:
+ if (rbio) {
+ /*
+ * promote already allocated bounce rbio:
+ * promote needs to allocate a bio big enough for uncompressing
+ * data in the write path, but we're not going to use it all
+ * here:
+ */
+ EBUG_ON(rbio->bio.bi_iter.bi_size <
+ pick.crc.compressed_size << 9);
+ rbio->bio.bi_iter.bi_size =
+ pick.crc.compressed_size << 9;
+ } else if (bounce) {
+ unsigned sectors = pick.crc.compressed_size;
+
+ rbio = rbio_init(bio_alloc_bioset(NULL,
+ DIV_ROUND_UP(sectors, PAGE_SECTORS),
+ 0,
+ GFP_NOFS,
+ &c->bio_read_split),
+ orig->opts);
+
+ bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
+ rbio->bounce = true;
+ rbio->split = true;
+ } else if (flags & BCH_READ_MUST_CLONE) {
+ /*
+ * Have to clone if there were any splits, due to error
+ * reporting issues (if a split errored, and retrying didn't
+ * work, when it reports the error to its parent (us) we don't
+ * know if the error was from our bio, and we should retry, or
+ * from the whole bio, in which case we don't want to retry and
+ * lose the error)
+ */
+ rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
+ &c->bio_read_split),
+ orig->opts);
+ rbio->bio.bi_iter = iter;
+ rbio->split = true;
+ } else {
+ rbio = orig;
+ rbio->bio.bi_iter = iter;
+ EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
+ }
+
+ EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
+
+ rbio->c = c;
+ rbio->submit_time = local_clock();
+ if (rbio->split)
+ rbio->parent = orig;
+ else
+ rbio->end_io = orig->bio.bi_end_io;
+ rbio->bvec_iter = iter;
+ rbio->offset_into_extent= offset_into_extent;
+ rbio->flags = flags;
+ rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
+ rbio->narrow_crcs = narrow_crcs;
+ rbio->hole = 0;
+ rbio->retry = 0;
+ rbio->context = 0;
+ /* XXX: only initialize this if needed */
+ rbio->devs_have = bch2_bkey_devs(k);
+ rbio->pick = pick;
+ rbio->subvol = orig->subvol;
+ rbio->read_pos = read_pos;
+ rbio->data_btree = data_btree;
+ rbio->data_pos = data_pos;
+ rbio->version = k.k->version;
+ rbio->promote = promote;
+ INIT_WORK(&rbio->work, NULL);
+
+ rbio->bio.bi_opf = orig->bio.bi_opf;
+ rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
+ rbio->bio.bi_end_io = bch2_read_endio;
+
+ if (rbio->bounce)
+ trace_and_count(c, read_bounce, &rbio->bio);
+
+ this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
+ bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
+
+ /*
+ * If it's being moved internally, we don't want to flag it as a cache
+ * hit:
+ */
+ if (pick.ptr.cached && !(flags & BCH_READ_NODECODE))
+ bch2_bucket_io_time_reset(trans, pick.ptr.dev,
+ PTR_BUCKET_NR(ca, &pick.ptr), READ);
+
+ if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
+ bio_inc_remaining(&orig->bio);
+ trace_and_count(c, read_split, &orig->bio);
+ }
+
+ if (!rbio->pick.idx) {
+ if (!rbio->have_ioref) {
+ bch_err_inum_offset_ratelimited(c,
+ read_pos.inode,
+ read_pos.offset << 9,
+ "no device to read from");
+ bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
+ goto out;
+ }
+
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
+ bio_sectors(&rbio->bio));
+ bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
+
+ if (unlikely(c->opts.no_data_io)) {
+ if (likely(!(flags & BCH_READ_IN_RETRY)))
+ bio_endio(&rbio->bio);
+ } else {
+ if (likely(!(flags & BCH_READ_IN_RETRY)))
+ submit_bio(&rbio->bio);
+ else
+ submit_bio_wait(&rbio->bio);
+ }
+
+ /*
+ * We just submitted IO which may block, we expect relock fail
+ * events and shouldn't count them:
+ */
+ trans->notrace_relock_fail = true;
+ } else {
+ /* Attempting reconstruct read: */
+ if (bch2_ec_read_extent(c, rbio)) {
+ bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
+ goto out;
+ }
+
+ if (likely(!(flags & BCH_READ_IN_RETRY)))
+ bio_endio(&rbio->bio);
+ }
+out:
+ if (likely(!(flags & BCH_READ_IN_RETRY))) {
+ return 0;
+ } else {
+ int ret;
+
+ rbio->context = RBIO_CONTEXT_UNBOUND;
+ bch2_read_endio(&rbio->bio);
+
+ ret = rbio->retry;
+ rbio = bch2_rbio_free(rbio);
+
+ if (ret == READ_RETRY_AVOID) {
+ bch2_mark_io_failure(failed, &pick);
+ ret = READ_RETRY;
+ }
+
+ if (!ret)
+ goto out_read_done;
+
+ return ret;
+ }
+
+err:
+ if (flags & BCH_READ_IN_RETRY)
+ return READ_ERR;
+
+ orig->bio.bi_status = BLK_STS_IOERR;
+ goto out_read_done;
+
+hole:
+ /*
+ * won't normally happen in the BCH_READ_NODECODE
+ * (bch2_move_extent()) path, but if we retry and the extent we wanted
+ * to read no longer exists we have to signal that:
+ */
+ if (flags & BCH_READ_NODECODE)
+ orig->hole = true;
+
+ zero_fill_bio_iter(&orig->bio, iter);
+out_read_done:
+ if (flags & BCH_READ_LAST_FRAGMENT)
+ bch2_rbio_done(orig);
+ return 0;
+}
+
+void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
+ struct bvec_iter bvec_iter, subvol_inum inum,
+ struct bch_io_failures *failed, unsigned flags)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_buf sk;
+ struct bkey_s_c k;
+ u32 snapshot;
+ int ret;
+
+ BUG_ON(flags & BCH_READ_NODECODE);
+
+ bch2_bkey_buf_init(&sk);
+retry:
+ bch2_trans_begin(trans);
+ iter = (struct btree_iter) { NULL };
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
+ BTREE_ITER_SLOTS);
+ while (1) {
+ unsigned bytes, sectors, offset_into_extent;
+ enum btree_id data_btree = BTREE_ID_extents;
+
+ /*
+ * read_extent -> io_time_reset may cause a transaction restart
+ * without returning an error, we need to check for that here:
+ */
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ break;
+
+ bch2_btree_iter_set_pos(&iter,
+ POS(inum.inum, bvec_iter.bi_sector));
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
+
+ offset_into_extent = iter.pos.offset -
+ bkey_start_offset(k.k);
+ sectors = k.k->size - offset_into_extent;
+
+ bch2_bkey_buf_reassemble(&sk, c, k);
+
+ ret = bch2_read_indirect_extent(trans, &data_btree,
+ &offset_into_extent, &sk);
+ if (ret)
+ break;
+
+ k = bkey_i_to_s_c(sk.k);
+
+ /*
+ * With indirect extents, the amount of data to read is the min
+ * of the original extent and the indirect extent:
+ */
+ sectors = min(sectors, k.k->size - offset_into_extent);
+
+ bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
+ swap(bvec_iter.bi_size, bytes);
+
+ if (bvec_iter.bi_size == bytes)
+ flags |= BCH_READ_LAST_FRAGMENT;
+
+ ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
+ data_btree, k,
+ offset_into_extent, failed, flags);
+ if (ret)
+ break;
+
+ if (flags & BCH_READ_LAST_FRAGMENT)
+ break;
+
+ swap(bvec_iter.bi_size, bytes);
+ bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
+
+ ret = btree_trans_too_many_iters(trans);
+ if (ret)
+ break;
+ }
+err:
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+ ret == READ_RETRY ||
+ ret == READ_RETRY_AVOID)
+ goto retry;
+
+ bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&sk, c);
+
+ if (ret) {
+ bch_err_inum_offset_ratelimited(c, inum.inum,
+ bvec_iter.bi_sector << 9,
+ "read error %i from btree lookup", ret);
+ rbio->bio.bi_status = BLK_STS_IOERR;
+ bch2_rbio_done(rbio);
+ }
+}
+
+void bch2_fs_io_read_exit(struct bch_fs *c)
+{
+ if (c->promote_table.tbl)
+ rhashtable_destroy(&c->promote_table);
+ bioset_exit(&c->bio_read_split);
+ bioset_exit(&c->bio_read);
+}
+
+int bch2_fs_io_read_init(struct bch_fs *c)
+{
+ if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_bio_read_init;
+
+ if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_bio_read_split_init;
+
+ if (rhashtable_init(&c->promote_table, &bch_promote_params))
+ return -BCH_ERR_ENOMEM_promote_table_init;
+
+ return 0;
+}
diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h
new file mode 100644
index 000000000000..d9c18bb7d403
--- /dev/null
+++ b/fs/bcachefs/io_read.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_IO_READ_H
+#define _BCACHEFS_IO_READ_H
+
+#include "bkey_buf.h"
+
+struct bch_read_bio {
+ struct bch_fs *c;
+ u64 start_time;
+ u64 submit_time;
+
+ /*
+ * Reads will often have to be split, and if the extent being read from
+ * was checksummed or compressed we'll also have to allocate bounce
+ * buffers and copy the data back into the original bio.
+ *
+ * If we didn't have to split, we have to save and restore the original
+ * bi_end_io - @split below indicates which:
+ */
+ union {
+ struct bch_read_bio *parent;
+ bio_end_io_t *end_io;
+ };
+
+ /*
+ * Saved copy of bio->bi_iter, from submission time - allows us to
+ * resubmit on IO error, and also to copy data back to the original bio
+ * when we're bouncing:
+ */
+ struct bvec_iter bvec_iter;
+
+ unsigned offset_into_extent;
+
+ u16 flags;
+ union {
+ struct {
+ u16 bounce:1,
+ split:1,
+ kmalloc:1,
+ have_ioref:1,
+ narrow_crcs:1,
+ hole:1,
+ retry:2,
+ context:2;
+ };
+ u16 _state;
+ };
+
+ struct bch_devs_list devs_have;
+
+ struct extent_ptr_decoded pick;
+
+ /*
+ * pos we read from - different from data_pos for indirect extents:
+ */
+ u32 subvol;
+ struct bpos read_pos;
+
+ /*
+ * start pos of data we read (may not be pos of data we want) - for
+ * promote, narrow extents paths:
+ */
+ enum btree_id data_btree;
+ struct bpos data_pos;
+ struct bversion version;
+
+ struct promote_op *promote;
+
+ struct bch_io_opts opts;
+
+ struct work_struct work;
+
+ struct bio bio;
+};
+
+#define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio)
+
+struct bch_devs_mask;
+struct cache_promote_op;
+struct extent_ptr_decoded;
+
+int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
+ struct bkey_buf *);
+
+static inline int bch2_read_indirect_extent(struct btree_trans *trans,
+ enum btree_id *data_btree,
+ unsigned *offset_into_extent,
+ struct bkey_buf *k)
+{
+ if (k->k->k.type != KEY_TYPE_reflink_p)
+ return 0;
+
+ *data_btree = BTREE_ID_reflink;
+ return __bch2_read_indirect_extent(trans, offset_into_extent, k);
+}
+
+enum bch_read_flags {
+ BCH_READ_RETRY_IF_STALE = 1 << 0,
+ BCH_READ_MAY_PROMOTE = 1 << 1,
+ BCH_READ_USER_MAPPED = 1 << 2,
+ BCH_READ_NODECODE = 1 << 3,
+ BCH_READ_LAST_FRAGMENT = 1 << 4,
+
+ /* internal: */
+ BCH_READ_MUST_BOUNCE = 1 << 5,
+ BCH_READ_MUST_CLONE = 1 << 6,
+ BCH_READ_IN_RETRY = 1 << 7,
+};
+
+int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
+ struct bvec_iter, struct bpos, enum btree_id,
+ struct bkey_s_c, unsigned,
+ struct bch_io_failures *, unsigned);
+
+static inline void bch2_read_extent(struct btree_trans *trans,
+ struct bch_read_bio *rbio, struct bpos read_pos,
+ enum btree_id data_btree, struct bkey_s_c k,
+ unsigned offset_into_extent, unsigned flags)
+{
+ __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
+ data_btree, k, offset_into_extent, NULL, flags);
+}
+
+void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
+ subvol_inum, struct bch_io_failures *, unsigned flags);
+
+static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
+ subvol_inum inum)
+{
+ struct bch_io_failures failed = { .nr = 0 };
+
+ BUG_ON(rbio->_state);
+
+ rbio->c = c;
+ rbio->start_time = local_clock();
+ rbio->subvol = inum.subvol;
+
+ __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
+ BCH_READ_RETRY_IF_STALE|
+ BCH_READ_MAY_PROMOTE|
+ BCH_READ_USER_MAPPED);
+}
+
+static inline struct bch_read_bio *rbio_init(struct bio *bio,
+ struct bch_io_opts opts)
+{
+ struct bch_read_bio *rbio = to_rbio(bio);
+
+ rbio->_state = 0;
+ rbio->promote = NULL;
+ rbio->opts = opts;
+ return rbio;
+}
+
+void bch2_fs_io_read_exit(struct bch_fs *);
+int bch2_fs_io_read_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_IO_READ_H */
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
new file mode 100644
index 000000000000..6e4f85eb6ec8
--- /dev/null
+++ b/fs/bcachefs/io_write.c
@@ -0,0 +1,1671 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_buf.h"
+#include "bset.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "checksum.h"
+#include "clock.h"
+#include "compress.h"
+#include "debug.h"
+#include "ec.h"
+#include "error.h"
+#include "extent_update.h"
+#include "inode.h"
+#include "io_write.h"
+#include "journal.h"
+#include "keylist.h"
+#include "move.h"
+#include "nocow_locking.h"
+#include "rebalance.h"
+#include "subvolume.h"
+#include "super.h"
+#include "super-io.h"
+#include "trace.h"
+
+#include <linux/blkdev.h>
+#include <linux/prefetch.h>
+#include <linux/random.h>
+#include <linux/sched/mm.h>
+
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
+
+static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
+ u64 now, int rw)
+{
+ u64 latency_capable =
+ ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
+ /* ideally we'd be taking into account the device's variance here: */
+ u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
+ s64 latency_over = io_latency - latency_threshold;
+
+ if (latency_threshold && latency_over > 0) {
+ /*
+ * bump up congested by approximately latency_over * 4 /
+ * latency_threshold - we don't need much accuracy here so don't
+ * bother with the divide:
+ */
+ if (atomic_read(&ca->congested) < CONGESTED_MAX)
+ atomic_add(latency_over >>
+ max_t(int, ilog2(latency_threshold) - 2, 0),
+ &ca->congested);
+
+ ca->congested_last = now;
+ } else if (atomic_read(&ca->congested) > 0) {
+ atomic_dec(&ca->congested);
+ }
+}
+
+void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
+{
+ atomic64_t *latency = &ca->cur_latency[rw];
+ u64 now = local_clock();
+ u64 io_latency = time_after64(now, submit_time)
+ ? now - submit_time
+ : 0;
+ u64 old, new, v = atomic64_read(latency);
+
+ do {
+ old = v;
+
+ /*
+ * If the io latency was reasonably close to the current
+ * latency, skip doing the update and atomic operation - most of
+ * the time:
+ */
+ if (abs((int) (old - io_latency)) < (old >> 1) &&
+ now & ~(~0U << 5))
+ break;
+
+ new = ewma_add(old, io_latency, 5);
+ } while ((v = atomic64_cmpxchg(latency, old, new)) != old);
+
+ bch2_congested_acct(ca, io_latency, now, rw);
+
+ __bch2_time_stats_update(&ca->io_latency[rw], submit_time, now);
+}
+
+#endif
+
+/* Allocate, free from mempool: */
+
+void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
+{
+ struct bvec_iter_all iter;
+ struct bio_vec *bv;
+
+ bio_for_each_segment_all(bv, bio, iter)
+ if (bv->bv_page != ZERO_PAGE(0))
+ mempool_free(bv->bv_page, &c->bio_bounce_pages);
+ bio->bi_vcnt = 0;
+}
+
+static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
+{
+ struct page *page;
+
+ if (likely(!*using_mempool)) {
+ page = alloc_page(GFP_NOFS);
+ if (unlikely(!page)) {
+ mutex_lock(&c->bio_bounce_pages_lock);
+ *using_mempool = true;
+ goto pool_alloc;
+
+ }
+ } else {
+pool_alloc:
+ page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
+ }
+
+ return page;
+}
+
+void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
+ size_t size)
+{
+ bool using_mempool = false;
+
+ while (size) {
+ struct page *page = __bio_alloc_page_pool(c, &using_mempool);
+ unsigned len = min_t(size_t, PAGE_SIZE, size);
+
+ BUG_ON(!bio_add_page(bio, page, len, 0));
+ size -= len;
+ }
+
+ if (using_mempool)
+ mutex_unlock(&c->bio_bounce_pages_lock);
+}
+
+/* Extent update path: */
+
+int bch2_sum_sector_overwrites(struct btree_trans *trans,
+ struct btree_iter *extent_iter,
+ struct bkey_i *new,
+ bool *usage_increasing,
+ s64 *i_sectors_delta,
+ s64 *disk_sectors_delta)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c old;
+ unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
+ bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
+ int ret = 0;
+
+ *usage_increasing = false;
+ *i_sectors_delta = 0;
+ *disk_sectors_delta = 0;
+
+ bch2_trans_copy_iter(&iter, extent_iter);
+
+ for_each_btree_key_upto_continue_norestart(iter,
+ new->k.p, BTREE_ITER_SLOTS, old, ret) {
+ s64 sectors = min(new->k.p.offset, old.k->p.offset) -
+ max(bkey_start_offset(&new->k),
+ bkey_start_offset(old.k));
+
+ *i_sectors_delta += sectors *
+ (bkey_extent_is_allocation(&new->k) -
+ bkey_extent_is_allocation(old.k));
+
+ *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
+ *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
+ ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
+ : 0;
+
+ if (!*usage_increasing &&
+ (new->k.p.snapshot != old.k->p.snapshot ||
+ new_replicas > bch2_bkey_replicas(c, old) ||
+ (!new_compressed && bch2_bkey_sectors_compressed(old))))
+ *usage_increasing = true;
+
+ if (bkey_ge(old.k->p, new->k.p))
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
+ struct btree_iter *extent_iter,
+ u64 new_i_size,
+ s64 i_sectors_delta)
+{
+ struct btree_iter iter;
+ struct bkey_i *k;
+ struct bkey_i_inode_v3 *inode;
+ unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
+ int ret;
+
+ k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
+ SPOS(0,
+ extent_iter->pos.inode,
+ extent_iter->snapshot),
+ BTREE_ITER_CACHED);
+ ret = PTR_ERR_OR_ZERO(k);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
+ k = bch2_inode_to_v3(trans, k);
+ ret = PTR_ERR_OR_ZERO(k);
+ if (unlikely(ret))
+ goto err;
+ }
+
+ inode = bkey_i_to_inode_v3(k);
+
+ if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
+ new_i_size > le64_to_cpu(inode->v.bi_size)) {
+ inode->v.bi_size = cpu_to_le64(new_i_size);
+ inode_update_flags = 0;
+ }
+
+ if (i_sectors_delta) {
+ le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
+ inode_update_flags = 0;
+ }
+
+ if (inode->k.p.snapshot != iter.snapshot) {
+ inode->k.p.snapshot = iter.snapshot;
+ inode_update_flags = 0;
+ }
+
+ ret = bch2_trans_update(trans, &iter, &inode->k_i,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ inode_update_flags);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_extent_update(struct btree_trans *trans,
+ subvol_inum inum,
+ struct btree_iter *iter,
+ struct bkey_i *k,
+ struct disk_reservation *disk_res,
+ u64 new_i_size,
+ s64 *i_sectors_delta_total,
+ bool check_enospc)
+{
+ struct bpos next_pos;
+ bool usage_increasing;
+ s64 i_sectors_delta = 0, disk_sectors_delta = 0;
+ int ret;
+
+ /*
+ * This traverses us the iterator without changing iter->path->pos to
+ * search_key() (which is pos + 1 for extents): we want there to be a
+ * path already traversed at iter->pos because
+ * bch2_trans_extent_update() will use it to attempt extent merging
+ */
+ ret = __bch2_btree_iter_traverse(iter);
+ if (ret)
+ return ret;
+
+ ret = bch2_extent_trim_atomic(trans, iter, k);
+ if (ret)
+ return ret;
+
+ next_pos = k->k.p;
+
+ ret = bch2_sum_sector_overwrites(trans, iter, k,
+ &usage_increasing,
+ &i_sectors_delta,
+ &disk_sectors_delta);
+ if (ret)
+ return ret;
+
+ if (disk_res &&
+ disk_sectors_delta > (s64) disk_res->sectors) {
+ ret = bch2_disk_reservation_add(trans->c, disk_res,
+ disk_sectors_delta - disk_res->sectors,
+ !check_enospc || !usage_increasing
+ ? BCH_DISK_RESERVATION_NOFAIL : 0);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Note:
+ * We always have to do an inode update - even when i_size/i_sectors
+ * aren't changing - for fsync to work properly; fsync relies on
+ * inode->bi_journal_seq which is updated by the trigger code:
+ */
+ ret = bch2_extent_update_i_size_sectors(trans, iter,
+ min(k->k.p.offset << 9, new_i_size),
+ i_sectors_delta) ?:
+ bch2_trans_update(trans, iter, k, 0) ?:
+ bch2_trans_commit(trans, disk_res, NULL,
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL);
+ if (unlikely(ret))
+ return ret;
+
+ if (i_sectors_delta_total)
+ *i_sectors_delta_total += i_sectors_delta;
+ bch2_btree_iter_set_pos(iter, next_pos);
+ return 0;
+}
+
+static int bch2_write_index_default(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct bkey_buf sk;
+ struct keylist *keys = &op->insert_keys;
+ struct bkey_i *k = bch2_keylist_front(keys);
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ subvol_inum inum = {
+ .subvol = op->subvol,
+ .inum = k->k.p.inode,
+ };
+ int ret;
+
+ BUG_ON(!inum.subvol);
+
+ bch2_bkey_buf_init(&sk);
+
+ do {
+ bch2_trans_begin(trans);
+
+ k = bch2_keylist_front(keys);
+ bch2_bkey_buf_copy(&sk, c, k);
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
+ &sk.k->k.p.snapshot);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ bkey_start_pos(&sk.k->k),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+
+ ret = bch2_extent_update(trans, inum, &iter, sk.k,
+ &op->res,
+ op->new_i_size, &op->i_sectors_delta,
+ op->flags & BCH_WRITE_CHECK_ENOSPC);
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+
+ if (bkey_ge(iter.pos, k->k.p))
+ bch2_keylist_pop_front(&op->insert_keys);
+ else
+ bch2_cut_front(iter.pos, k);
+ } while (!bch2_keylist_empty(keys));
+
+ bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&sk, c);
+
+ return ret;
+}
+
+/* Writes */
+
+void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
+ enum bch_data_type type,
+ const struct bkey_i *k,
+ bool nocow)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
+ const struct bch_extent_ptr *ptr;
+ struct bch_write_bio *n;
+ struct bch_dev *ca;
+
+ BUG_ON(c->opts.nochanges);
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ BUG_ON(ptr->dev >= BCH_SB_MEMBERS_MAX ||
+ !c->devs[ptr->dev]);
+
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+
+ if (to_entry(ptr + 1) < ptrs.end) {
+ n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
+ GFP_NOFS, &ca->replica_set));
+
+ n->bio.bi_end_io = wbio->bio.bi_end_io;
+ n->bio.bi_private = wbio->bio.bi_private;
+ n->parent = wbio;
+ n->split = true;
+ n->bounce = false;
+ n->put_bio = true;
+ n->bio.bi_opf = wbio->bio.bi_opf;
+ bio_inc_remaining(&wbio->bio);
+ } else {
+ n = wbio;
+ n->split = false;
+ }
+
+ n->c = c;
+ n->dev = ptr->dev;
+ n->have_ioref = nocow || bch2_dev_get_ioref(ca,
+ type == BCH_DATA_btree ? READ : WRITE);
+ n->nocow = nocow;
+ n->submit_time = local_clock();
+ n->inode_offset = bkey_start_offset(&k->k);
+ n->bio.bi_iter.bi_sector = ptr->offset;
+
+ if (likely(n->have_ioref)) {
+ this_cpu_add(ca->io_done->sectors[WRITE][type],
+ bio_sectors(&n->bio));
+
+ bio_set_dev(&n->bio, ca->disk_sb.bdev);
+
+ if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
+ bio_endio(&n->bio);
+ continue;
+ }
+
+ submit_bio(&n->bio);
+ } else {
+ n->bio.bi_status = BLK_STS_REMOVED;
+ bio_endio(&n->bio);
+ }
+ }
+}
+
+static void __bch2_write(struct bch_write_op *);
+
+static void bch2_write_done(struct closure *cl)
+{
+ struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+ struct bch_fs *c = op->c;
+
+ EBUG_ON(op->open_buckets.nr);
+
+ bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
+ bch2_disk_reservation_put(c, &op->res);
+
+ if (!(op->flags & BCH_WRITE_MOVE))
+ bch2_write_ref_put(c, BCH_WRITE_REF_write);
+ bch2_keylist_free(&op->insert_keys, op->inline_keys);
+
+ EBUG_ON(cl->parent);
+ closure_debug_destroy(cl);
+ if (op->end_io)
+ op->end_io(op);
+}
+
+static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
+{
+ struct keylist *keys = &op->insert_keys;
+ struct bch_extent_ptr *ptr;
+ struct bkey_i *src, *dst = keys->keys, *n;
+
+ for (src = keys->keys; src != keys->top; src = n) {
+ n = bkey_next(src);
+
+ if (bkey_extent_is_direct_data(&src->k)) {
+ bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
+ test_bit(ptr->dev, op->failed.d));
+
+ if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
+ return -EIO;
+ }
+
+ if (dst != src)
+ memmove_u64s_down(dst, src, src->k.u64s);
+ dst = bkey_next(dst);
+ }
+
+ keys->top = dst;
+ return 0;
+}
+
+/**
+ * __bch2_write_index - after a write, update index to point to new data
+ * @op: bch_write_op to process
+ */
+static void __bch2_write_index(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct keylist *keys = &op->insert_keys;
+ struct bkey_i *k;
+ unsigned dev;
+ int ret = 0;
+
+ if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
+ ret = bch2_write_drop_io_error_ptrs(op);
+ if (ret)
+ goto err;
+ }
+
+ /*
+ * probably not the ideal place to hook this in, but I don't
+ * particularly want to plumb io_opts all the way through the btree
+ * update stack right now
+ */
+ for_each_keylist_key(keys, k)
+ bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
+
+ if (!bch2_keylist_empty(keys)) {
+ u64 sectors_start = keylist_sectors(keys);
+
+ ret = !(op->flags & BCH_WRITE_MOVE)
+ ? bch2_write_index_default(op)
+ : bch2_data_update_index_update(op);
+
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
+ BUG_ON(keylist_sectors(keys) && !ret);
+
+ op->written += sectors_start - keylist_sectors(keys);
+
+ if (ret && !bch2_err_matches(ret, EROFS)) {
+ struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
+
+ bch_err_inum_offset_ratelimited(c,
+ insert->k.p.inode, insert->k.p.offset << 9,
+ "write error while doing btree update: %s",
+ bch2_err_str(ret));
+ }
+
+ if (ret)
+ goto err;
+ }
+out:
+ /* If some a bucket wasn't written, we can't erasure code it: */
+ for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
+ bch2_open_bucket_write_error(c, &op->open_buckets, dev);
+
+ bch2_open_buckets_put(c, &op->open_buckets);
+ return;
+err:
+ keys->top = keys->keys;
+ op->error = ret;
+ op->flags |= BCH_WRITE_DONE;
+ goto out;
+}
+
+static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
+{
+ if (state != wp->state) {
+ u64 now = ktime_get_ns();
+
+ if (wp->last_state_change &&
+ time_after64(now, wp->last_state_change))
+ wp->time[wp->state] += now - wp->last_state_change;
+ wp->state = state;
+ wp->last_state_change = now;
+ }
+}
+
+static inline void wp_update_state(struct write_point *wp, bool running)
+{
+ enum write_point_state state;
+
+ state = running ? WRITE_POINT_running :
+ !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
+ : WRITE_POINT_stopped;
+
+ __wp_update_state(wp, state);
+}
+
+static void bch2_write_index(struct closure *cl)
+{
+ struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+ struct write_point *wp = op->wp;
+ struct workqueue_struct *wq = index_update_wq(op);
+ unsigned long flags;
+
+ if ((op->flags & BCH_WRITE_DONE) &&
+ (op->flags & BCH_WRITE_MOVE))
+ bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
+
+ spin_lock_irqsave(&wp->writes_lock, flags);
+ if (wp->state == WRITE_POINT_waiting_io)
+ __wp_update_state(wp, WRITE_POINT_waiting_work);
+ list_add_tail(&op->wp_list, &wp->writes);
+ spin_unlock_irqrestore (&wp->writes_lock, flags);
+
+ queue_work(wq, &wp->index_update_work);
+}
+
+static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
+{
+ op->wp = wp;
+
+ if (wp->state == WRITE_POINT_stopped) {
+ spin_lock_irq(&wp->writes_lock);
+ __wp_update_state(wp, WRITE_POINT_waiting_io);
+ spin_unlock_irq(&wp->writes_lock);
+ }
+}
+
+void bch2_write_point_do_index_updates(struct work_struct *work)
+{
+ struct write_point *wp =
+ container_of(work, struct write_point, index_update_work);
+ struct bch_write_op *op;
+
+ while (1) {
+ spin_lock_irq(&wp->writes_lock);
+ op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list);
+ if (op)
+ list_del(&op->wp_list);
+ wp_update_state(wp, op != NULL);
+ spin_unlock_irq(&wp->writes_lock);
+
+ if (!op)
+ break;
+
+ op->flags |= BCH_WRITE_IN_WORKER;
+
+ __bch2_write_index(op);
+
+ if (!(op->flags & BCH_WRITE_DONE))
+ __bch2_write(op);
+ else
+ bch2_write_done(&op->cl);
+ }
+}
+
+static void bch2_write_endio(struct bio *bio)
+{
+ struct closure *cl = bio->bi_private;
+ struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+ struct bch_write_bio *wbio = to_wbio(bio);
+ struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
+ struct bch_fs *c = wbio->c;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
+
+ if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
+ op->pos.inode,
+ wbio->inode_offset << 9,
+ "data write error: %s",
+ bch2_blk_status_to_str(bio->bi_status))) {
+ set_bit(wbio->dev, op->failed.d);
+ op->flags |= BCH_WRITE_IO_ERROR;
+ }
+
+ if (wbio->nocow)
+ set_bit(wbio->dev, op->devs_need_flush->d);
+
+ if (wbio->have_ioref) {
+ bch2_latency_acct(ca, wbio->submit_time, WRITE);
+ percpu_ref_put(&ca->io_ref);
+ }
+
+ if (wbio->bounce)
+ bch2_bio_free_pages_pool(c, bio);
+
+ if (wbio->put_bio)
+ bio_put(bio);
+
+ if (parent)
+ bio_endio(&parent->bio);
+ else
+ closure_put(cl);
+}
+
+static void init_append_extent(struct bch_write_op *op,
+ struct write_point *wp,
+ struct bversion version,
+ struct bch_extent_crc_unpacked crc)
+{
+ struct bkey_i_extent *e;
+
+ op->pos.offset += crc.uncompressed_size;
+
+ e = bkey_extent_init(op->insert_keys.top);
+ e->k.p = op->pos;
+ e->k.size = crc.uncompressed_size;
+ e->k.version = version;
+
+ if (crc.csum_type ||
+ crc.compression_type ||
+ crc.nonce)
+ bch2_extent_crc_append(&e->k_i, crc);
+
+ bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
+ op->flags & BCH_WRITE_CACHED);
+
+ bch2_keylist_push(&op->insert_keys);
+}
+
+static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
+ struct write_point *wp,
+ struct bio *src,
+ bool *page_alloc_failed,
+ void *buf)
+{
+ struct bch_write_bio *wbio;
+ struct bio *bio;
+ unsigned output_available =
+ min(wp->sectors_free << 9, src->bi_iter.bi_size);
+ unsigned pages = DIV_ROUND_UP(output_available +
+ (buf
+ ? ((unsigned long) buf & (PAGE_SIZE - 1))
+ : 0), PAGE_SIZE);
+
+ pages = min(pages, BIO_MAX_VECS);
+
+ bio = bio_alloc_bioset(NULL, pages, 0,
+ GFP_NOFS, &c->bio_write);
+ wbio = wbio_init(bio);
+ wbio->put_bio = true;
+ /* copy WRITE_SYNC flag */
+ wbio->bio.bi_opf = src->bi_opf;
+
+ if (buf) {
+ bch2_bio_map(bio, buf, output_available);
+ return bio;
+ }
+
+ wbio->bounce = true;
+
+ /*
+ * We can't use mempool for more than c->sb.encoded_extent_max
+ * worth of pages, but we'd like to allocate more if we can:
+ */
+ bch2_bio_alloc_pages_pool(c, bio,
+ min_t(unsigned, output_available,
+ c->opts.encoded_extent_max));
+
+ if (bio->bi_iter.bi_size < output_available)
+ *page_alloc_failed =
+ bch2_bio_alloc_pages(bio,
+ output_available -
+ bio->bi_iter.bi_size,
+ GFP_NOFS) != 0;
+
+ return bio;
+}
+
+static int bch2_write_rechecksum(struct bch_fs *c,
+ struct bch_write_op *op,
+ unsigned new_csum_type)
+{
+ struct bio *bio = &op->wbio.bio;
+ struct bch_extent_crc_unpacked new_crc;
+ int ret;
+
+ /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
+
+ if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
+ bch2_csum_type_is_encryption(new_csum_type))
+ new_csum_type = op->crc.csum_type;
+
+ ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
+ NULL, &new_crc,
+ op->crc.offset, op->crc.live_size,
+ new_csum_type);
+ if (ret)
+ return ret;
+
+ bio_advance(bio, op->crc.offset << 9);
+ bio->bi_iter.bi_size = op->crc.live_size << 9;
+ op->crc = new_crc;
+ return 0;
+}
+
+static int bch2_write_decrypt(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct nonce nonce = extent_nonce(op->version, op->crc);
+ struct bch_csum csum;
+ int ret;
+
+ if (!bch2_csum_type_is_encryption(op->crc.csum_type))
+ return 0;
+
+ /*
+ * If we need to decrypt data in the write path, we'll no longer be able
+ * to verify the existing checksum (poly1305 mac, in this case) after
+ * it's decrypted - this is the last point we'll be able to reverify the
+ * checksum:
+ */
+ csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
+ if (bch2_crc_cmp(op->crc.csum, csum))
+ return -EIO;
+
+ ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
+ op->crc.csum_type = 0;
+ op->crc.csum = (struct bch_csum) { 0, 0 };
+ return ret;
+}
+
+static enum prep_encoded_ret {
+ PREP_ENCODED_OK,
+ PREP_ENCODED_ERR,
+ PREP_ENCODED_CHECKSUM_ERR,
+ PREP_ENCODED_DO_WRITE,
+} bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
+{
+ struct bch_fs *c = op->c;
+ struct bio *bio = &op->wbio.bio;
+
+ if (!(op->flags & BCH_WRITE_DATA_ENCODED))
+ return PREP_ENCODED_OK;
+
+ BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
+
+ /* Can we just write the entire extent as is? */
+ if (op->crc.uncompressed_size == op->crc.live_size &&
+ op->crc.compressed_size <= wp->sectors_free &&
+ (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
+ op->incompressible)) {
+ if (!crc_is_compressed(op->crc) &&
+ op->csum_type != op->crc.csum_type &&
+ bch2_write_rechecksum(c, op, op->csum_type) &&
+ !c->opts.no_data_io)
+ return PREP_ENCODED_CHECKSUM_ERR;
+
+ return PREP_ENCODED_DO_WRITE;
+ }
+
+ /*
+ * If the data is compressed and we couldn't write the entire extent as
+ * is, we have to decompress it:
+ */
+ if (crc_is_compressed(op->crc)) {
+ struct bch_csum csum;
+
+ if (bch2_write_decrypt(op))
+ return PREP_ENCODED_CHECKSUM_ERR;
+
+ /* Last point we can still verify checksum: */
+ csum = bch2_checksum_bio(c, op->crc.csum_type,
+ extent_nonce(op->version, op->crc),
+ bio);
+ if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
+ return PREP_ENCODED_CHECKSUM_ERR;
+
+ if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
+ return PREP_ENCODED_ERR;
+ }
+
+ /*
+ * No longer have compressed data after this point - data might be
+ * encrypted:
+ */
+
+ /*
+ * If the data is checksummed and we're only writing a subset,
+ * rechecksum and adjust bio to point to currently live data:
+ */
+ if ((op->crc.live_size != op->crc.uncompressed_size ||
+ op->crc.csum_type != op->csum_type) &&
+ bch2_write_rechecksum(c, op, op->csum_type) &&
+ !c->opts.no_data_io)
+ return PREP_ENCODED_CHECKSUM_ERR;
+
+ /*
+ * If we want to compress the data, it has to be decrypted:
+ */
+ if ((op->compression_opt ||
+ bch2_csum_type_is_encryption(op->crc.csum_type) !=
+ bch2_csum_type_is_encryption(op->csum_type)) &&
+ bch2_write_decrypt(op))
+ return PREP_ENCODED_CHECKSUM_ERR;
+
+ return PREP_ENCODED_OK;
+}
+
+static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
+ struct bio **_dst)
+{
+ struct bch_fs *c = op->c;
+ struct bio *src = &op->wbio.bio, *dst = src;
+ struct bvec_iter saved_iter;
+ void *ec_buf;
+ unsigned total_output = 0, total_input = 0;
+ bool bounce = false;
+ bool page_alloc_failed = false;
+ int ret, more = 0;
+
+ BUG_ON(!bio_sectors(src));
+
+ ec_buf = bch2_writepoint_ec_buf(c, wp);
+
+ switch (bch2_write_prep_encoded_data(op, wp)) {
+ case PREP_ENCODED_OK:
+ break;
+ case PREP_ENCODED_ERR:
+ ret = -EIO;
+ goto err;
+ case PREP_ENCODED_CHECKSUM_ERR:
+ goto csum_err;
+ case PREP_ENCODED_DO_WRITE:
+ /* XXX look for bug here */
+ if (ec_buf) {
+ dst = bch2_write_bio_alloc(c, wp, src,
+ &page_alloc_failed,
+ ec_buf);
+ bio_copy_data(dst, src);
+ bounce = true;
+ }
+ init_append_extent(op, wp, op->version, op->crc);
+ goto do_write;
+ }
+
+ if (ec_buf ||
+ op->compression_opt ||
+ (op->csum_type &&
+ !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
+ (bch2_csum_type_is_encryption(op->csum_type) &&
+ !(op->flags & BCH_WRITE_PAGES_OWNED))) {
+ dst = bch2_write_bio_alloc(c, wp, src,
+ &page_alloc_failed,
+ ec_buf);
+ bounce = true;
+ }
+
+ saved_iter = dst->bi_iter;
+
+ do {
+ struct bch_extent_crc_unpacked crc = { 0 };
+ struct bversion version = op->version;
+ size_t dst_len = 0, src_len = 0;
+
+ if (page_alloc_failed &&
+ dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
+ dst->bi_iter.bi_size < c->opts.encoded_extent_max)
+ break;
+
+ BUG_ON(op->compression_opt &&
+ (op->flags & BCH_WRITE_DATA_ENCODED) &&
+ bch2_csum_type_is_encryption(op->crc.csum_type));
+ BUG_ON(op->compression_opt && !bounce);
+
+ crc.compression_type = op->incompressible
+ ? BCH_COMPRESSION_TYPE_incompressible
+ : op->compression_opt
+ ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
+ op->compression_opt)
+ : 0;
+ if (!crc_is_compressed(crc)) {
+ dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
+ dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
+
+ if (op->csum_type)
+ dst_len = min_t(unsigned, dst_len,
+ c->opts.encoded_extent_max);
+
+ if (bounce) {
+ swap(dst->bi_iter.bi_size, dst_len);
+ bio_copy_data(dst, src);
+ swap(dst->bi_iter.bi_size, dst_len);
+ }
+
+ src_len = dst_len;
+ }
+
+ BUG_ON(!src_len || !dst_len);
+
+ if (bch2_csum_type_is_encryption(op->csum_type)) {
+ if (bversion_zero(version)) {
+ version.lo = atomic64_inc_return(&c->key_version);
+ } else {
+ crc.nonce = op->nonce;
+ op->nonce += src_len >> 9;
+ }
+ }
+
+ if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
+ !crc_is_compressed(crc) &&
+ bch2_csum_type_is_encryption(op->crc.csum_type) ==
+ bch2_csum_type_is_encryption(op->csum_type)) {
+ u8 compression_type = crc.compression_type;
+ u16 nonce = crc.nonce;
+ /*
+ * Note: when we're using rechecksum(), we need to be
+ * checksumming @src because it has all the data our
+ * existing checksum covers - if we bounced (because we
+ * were trying to compress), @dst will only have the
+ * part of the data the new checksum will cover.
+ *
+ * But normally we want to be checksumming post bounce,
+ * because part of the reason for bouncing is so the
+ * data can't be modified (by userspace) while it's in
+ * flight.
+ */
+ if (bch2_rechecksum_bio(c, src, version, op->crc,
+ &crc, &op->crc,
+ src_len >> 9,
+ bio_sectors(src) - (src_len >> 9),
+ op->csum_type))
+ goto csum_err;
+ /*
+ * rchecksum_bio sets compression_type on crc from op->crc,
+ * this isn't always correct as sometimes we're changing
+ * an extent from uncompressed to incompressible.
+ */
+ crc.compression_type = compression_type;
+ crc.nonce = nonce;
+ } else {
+ if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
+ bch2_rechecksum_bio(c, src, version, op->crc,
+ NULL, &op->crc,
+ src_len >> 9,
+ bio_sectors(src) - (src_len >> 9),
+ op->crc.csum_type))
+ goto csum_err;
+
+ crc.compressed_size = dst_len >> 9;
+ crc.uncompressed_size = src_len >> 9;
+ crc.live_size = src_len >> 9;
+
+ swap(dst->bi_iter.bi_size, dst_len);
+ ret = bch2_encrypt_bio(c, op->csum_type,
+ extent_nonce(version, crc), dst);
+ if (ret)
+ goto err;
+
+ crc.csum = bch2_checksum_bio(c, op->csum_type,
+ extent_nonce(version, crc), dst);
+ crc.csum_type = op->csum_type;
+ swap(dst->bi_iter.bi_size, dst_len);
+ }
+
+ init_append_extent(op, wp, version, crc);
+
+ if (dst != src)
+ bio_advance(dst, dst_len);
+ bio_advance(src, src_len);
+ total_output += dst_len;
+ total_input += src_len;
+ } while (dst->bi_iter.bi_size &&
+ src->bi_iter.bi_size &&
+ wp->sectors_free &&
+ !bch2_keylist_realloc(&op->insert_keys,
+ op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ BKEY_EXTENT_U64s_MAX));
+
+ more = src->bi_iter.bi_size != 0;
+
+ dst->bi_iter = saved_iter;
+
+ if (dst == src && more) {
+ BUG_ON(total_output != total_input);
+
+ dst = bio_split(src, total_input >> 9,
+ GFP_NOFS, &c->bio_write);
+ wbio_init(dst)->put_bio = true;
+ /* copy WRITE_SYNC flag */
+ dst->bi_opf = src->bi_opf;
+ }
+
+ dst->bi_iter.bi_size = total_output;
+do_write:
+ *_dst = dst;
+ return more;
+csum_err:
+ bch_err(c, "error verifying existing checksum while rewriting existing data (memory corruption?)");
+ ret = -EIO;
+err:
+ if (to_wbio(dst)->bounce)
+ bch2_bio_free_pages_pool(c, dst);
+ if (to_wbio(dst)->put_bio)
+ bio_put(dst);
+
+ return ret;
+}
+
+static bool bch2_extent_is_writeable(struct bch_write_op *op,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = op->c;
+ struct bkey_s_c_extent e;
+ struct extent_ptr_decoded p;
+ const union bch_extent_entry *entry;
+ unsigned replicas = 0;
+
+ if (k.k->type != KEY_TYPE_extent)
+ return false;
+
+ e = bkey_s_c_to_extent(k);
+ extent_for_each_ptr_decode(e, p, entry) {
+ if (p.crc.csum_type ||
+ crc_is_compressed(p.crc) ||
+ p.has_ec)
+ return false;
+
+ replicas += bch2_extent_ptr_durability(c, &p);
+ }
+
+ return replicas >= op->opts.data_replicas;
+}
+
+static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ const struct bch_extent_ptr *ptr;
+ struct bkey_i *k;
+
+ for_each_keylist_key(&op->insert_keys, k) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
+
+ bkey_for_each_ptr(ptrs, ptr)
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ PTR_BUCKET_POS(c, ptr),
+ BUCKET_NOCOW_LOCK_UPDATE);
+ }
+}
+
+static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *orig,
+ struct bkey_s_c k,
+ u64 new_i_size)
+{
+ struct bkey_i *new;
+ struct bkey_ptrs ptrs;
+ struct bch_extent_ptr *ptr;
+ int ret;
+
+ if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
+ /* trace this */
+ return 0;
+ }
+
+ new = bch2_bkey_make_mut_noupdate(trans, k);
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ return ret;
+
+ bch2_cut_front(bkey_start_pos(&orig->k), new);
+ bch2_cut_back(orig->k.p, new);
+
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_for_each_ptr(ptrs, ptr)
+ ptr->unwritten = 0;
+
+ /*
+ * Note that we're not calling bch2_subvol_get_snapshot() in this path -
+ * that was done when we kicked off the write, and here it's important
+ * that we update the extent that we wrote to - even if a snapshot has
+ * since been created. The write is still outstanding, so we're ok
+ * w.r.t. snapshot atomicity:
+ */
+ return bch2_extent_update_i_size_sectors(trans, iter,
+ min(new->k.p.offset << 9, new_i_size), 0) ?:
+ bch2_trans_update(trans, iter, new,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+}
+
+static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_i *orig;
+ struct bkey_s_c k;
+ int ret;
+
+ for_each_keylist_key(&op->insert_keys, orig) {
+ ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
+ bkey_start_pos(&orig->k), orig->k.p,
+ BTREE_ITER_INTENT, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL, ({
+ bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
+ }));
+
+ if (ret && !bch2_err_matches(ret, EROFS)) {
+ struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
+
+ bch_err_inum_offset_ratelimited(c,
+ insert->k.p.inode, insert->k.p.offset << 9,
+ "write error while doing btree update: %s",
+ bch2_err_str(ret));
+ }
+
+ if (ret) {
+ op->error = ret;
+ break;
+ }
+ }
+
+ bch2_trans_put(trans);
+}
+
+static void __bch2_nocow_write_done(struct bch_write_op *op)
+{
+ bch2_nocow_write_unlock(op);
+
+ if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
+ op->error = -EIO;
+ } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
+ bch2_nocow_write_convert_unwritten(op);
+}
+
+static void bch2_nocow_write_done(struct closure *cl)
+{
+ struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+
+ __bch2_nocow_write_done(op);
+ bch2_write_done(cl);
+}
+
+static void bch2_nocow_write(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_ptrs_c ptrs;
+ const struct bch_extent_ptr *ptr;
+ struct {
+ struct bpos b;
+ unsigned gen;
+ struct nocow_lock_bucket *l;
+ } buckets[BCH_REPLICAS_MAX];
+ unsigned nr_buckets = 0;
+ u32 snapshot;
+ int ret, i;
+
+ if (op->flags & BCH_WRITE_MOVE)
+ return;
+
+ trans = bch2_trans_get(c);
+retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
+ if (unlikely(ret))
+ goto err;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(op->pos.inode, op->pos.offset, snapshot),
+ BTREE_ITER_SLOTS);
+ while (1) {
+ struct bio *bio = &op->wbio.bio;
+
+ nr_buckets = 0;
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
+
+ /* fall back to normal cow write path? */
+ if (unlikely(k.k->p.snapshot != snapshot ||
+ !bch2_extent_is_writeable(op, k)))
+ break;
+
+ if (bch2_keylist_realloc(&op->insert_keys,
+ op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ k.k->u64s))
+ break;
+
+ /* Get iorefs before dropping btree locks: */
+ ptrs = bch2_bkey_ptrs_c(k);
+ bkey_for_each_ptr(ptrs, ptr) {
+ buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr);
+ buckets[nr_buckets].gen = ptr->gen;
+ buckets[nr_buckets].l =
+ bucket_nocow_lock(&c->nocow_locks,
+ bucket_to_u64(buckets[nr_buckets].b));
+
+ prefetch(buckets[nr_buckets].l);
+
+ if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
+ goto err_get_ioref;
+
+ nr_buckets++;
+
+ if (ptr->unwritten)
+ op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
+ }
+
+ /* Unlock before taking nocow locks, doing IO: */
+ bkey_reassemble(op->insert_keys.top, k);
+ bch2_trans_unlock(trans);
+
+ bch2_cut_front(op->pos, op->insert_keys.top);
+ if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
+ bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
+
+ for (i = 0; i < nr_buckets; i++) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode);
+ struct nocow_lock_bucket *l = buckets[i].l;
+ bool stale;
+
+ __bch2_bucket_nocow_lock(&c->nocow_locks, l,
+ bucket_to_u64(buckets[i].b),
+ BUCKET_NOCOW_LOCK_UPDATE);
+
+ rcu_read_lock();
+ stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen);
+ rcu_read_unlock();
+
+ if (unlikely(stale))
+ goto err_bucket_stale;
+ }
+
+ bio = &op->wbio.bio;
+ if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
+ bio = bio_split(bio, k.k->p.offset - op->pos.offset,
+ GFP_KERNEL, &c->bio_write);
+ wbio_init(bio)->put_bio = true;
+ bio->bi_opf = op->wbio.bio.bi_opf;
+ } else {
+ op->flags |= BCH_WRITE_DONE;
+ }
+
+ op->pos.offset += bio_sectors(bio);
+ op->written += bio_sectors(bio);
+
+ bio->bi_end_io = bch2_write_endio;
+ bio->bi_private = &op->cl;
+ bio->bi_opf |= REQ_OP_WRITE;
+ closure_get(&op->cl);
+ bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
+ op->insert_keys.top, true);
+
+ bch2_keylist_push(&op->insert_keys);
+ if (op->flags & BCH_WRITE_DONE)
+ break;
+ bch2_btree_iter_advance(&iter);
+ }
+out:
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ if (ret) {
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ op->pos.offset << 9,
+ "%s: btree lookup error %s",
+ __func__, bch2_err_str(ret));
+ op->error = ret;
+ op->flags |= BCH_WRITE_DONE;
+ }
+
+ bch2_trans_put(trans);
+
+ /* fallback to cow write path? */
+ if (!(op->flags & BCH_WRITE_DONE)) {
+ closure_sync(&op->cl);
+ __bch2_nocow_write_done(op);
+ op->insert_keys.top = op->insert_keys.keys;
+ } else if (op->flags & BCH_WRITE_SYNC) {
+ closure_sync(&op->cl);
+ bch2_nocow_write_done(&op->cl);
+ } else {
+ /*
+ * XXX
+ * needs to run out of process context because ei_quota_lock is
+ * a mutex
+ */
+ continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
+ }
+ return;
+err_get_ioref:
+ for (i = 0; i < nr_buckets; i++)
+ percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
+
+ /* Fall back to COW path: */
+ goto out;
+err_bucket_stale:
+ while (i >= 0) {
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ buckets[i].b,
+ BUCKET_NOCOW_LOCK_UPDATE);
+ --i;
+ }
+ for (i = 0; i < nr_buckets; i++)
+ percpu_ref_put(&bch_dev_bkey_exists(c, buckets[i].b.inode)->io_ref);
+
+ /* We can retry this: */
+ ret = -BCH_ERR_transaction_restart;
+ goto out;
+}
+
+static void __bch2_write(struct bch_write_op *op)
+{
+ struct bch_fs *c = op->c;
+ struct write_point *wp = NULL;
+ struct bio *bio = NULL;
+ unsigned nofs_flags;
+ int ret;
+
+ nofs_flags = memalloc_nofs_save();
+
+ if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
+ bch2_nocow_write(op);
+ if (op->flags & BCH_WRITE_DONE)
+ goto out_nofs_restore;
+ }
+again:
+ memset(&op->failed, 0, sizeof(op->failed));
+
+ do {
+ struct bkey_i *key_to_write;
+ unsigned key_to_write_offset = op->insert_keys.top_p -
+ op->insert_keys.keys_p;
+
+ /* +1 for possible cache device: */
+ if (op->open_buckets.nr + op->nr_replicas + 1 >
+ ARRAY_SIZE(op->open_buckets.v))
+ break;
+
+ if (bch2_keylist_realloc(&op->insert_keys,
+ op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ BKEY_EXTENT_U64s_MAX))
+ break;
+
+ /*
+ * The copygc thread is now global, which means it's no longer
+ * freeing up space on specific disks, which means that
+ * allocations for specific disks may hang arbitrarily long:
+ */
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_alloc_sectors_start_trans(trans,
+ op->target,
+ op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
+ op->write_point,
+ &op->devs_have,
+ op->nr_replicas,
+ op->nr_replicas_required,
+ op->watermark,
+ op->flags,
+ (op->flags & (BCH_WRITE_ALLOC_NOWAIT|
+ BCH_WRITE_ONLY_SPECIFIED_DEVS))
+ ? NULL : &op->cl, &wp));
+ if (unlikely(ret)) {
+ if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
+ break;
+
+ goto err;
+ }
+
+ EBUG_ON(!wp);
+
+ bch2_open_bucket_get(c, wp, &op->open_buckets);
+ ret = bch2_write_extent(op, wp, &bio);
+
+ bch2_alloc_sectors_done_inlined(c, wp);
+err:
+ if (ret <= 0) {
+ op->flags |= BCH_WRITE_DONE;
+
+ if (ret < 0) {
+ op->error = ret;
+ break;
+ }
+ }
+
+ bio->bi_end_io = bch2_write_endio;
+ bio->bi_private = &op->cl;
+ bio->bi_opf |= REQ_OP_WRITE;
+
+ closure_get(bio->bi_private);
+
+ key_to_write = (void *) (op->insert_keys.keys_p +
+ key_to_write_offset);
+
+ bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
+ key_to_write, false);
+ } while (ret);
+
+ /*
+ * Sync or no?
+ *
+ * If we're running asynchronously, wne may still want to block
+ * synchronously here if we weren't able to submit all of the IO at
+ * once, as that signals backpressure to the caller.
+ */
+ if ((op->flags & BCH_WRITE_SYNC) ||
+ (!(op->flags & BCH_WRITE_DONE) &&
+ !(op->flags & BCH_WRITE_IN_WORKER))) {
+ closure_sync(&op->cl);
+ __bch2_write_index(op);
+
+ if (!(op->flags & BCH_WRITE_DONE))
+ goto again;
+ bch2_write_done(&op->cl);
+ } else {
+ bch2_write_queue(op, wp);
+ continue_at(&op->cl, bch2_write_index, NULL);
+ }
+out_nofs_restore:
+ memalloc_nofs_restore(nofs_flags);
+}
+
+static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
+{
+ struct bio *bio = &op->wbio.bio;
+ struct bvec_iter iter;
+ struct bkey_i_inline_data *id;
+ unsigned sectors;
+ int ret;
+
+ op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
+ op->flags |= BCH_WRITE_DONE;
+
+ bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
+
+ ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
+ ARRAY_SIZE(op->inline_keys),
+ BKEY_U64s + DIV_ROUND_UP(data_len, 8));
+ if (ret) {
+ op->error = ret;
+ goto err;
+ }
+
+ sectors = bio_sectors(bio);
+ op->pos.offset += sectors;
+
+ id = bkey_inline_data_init(op->insert_keys.top);
+ id->k.p = op->pos;
+ id->k.version = op->version;
+ id->k.size = sectors;
+
+ iter = bio->bi_iter;
+ iter.bi_size = data_len;
+ memcpy_from_bio(id->v.data, bio, iter);
+
+ while (data_len & 7)
+ id->v.data[data_len++] = '\0';
+ set_bkey_val_bytes(&id->k, data_len);
+ bch2_keylist_push(&op->insert_keys);
+
+ __bch2_write_index(op);
+err:
+ bch2_write_done(&op->cl);
+}
+
+/**
+ * bch2_write() - handle a write to a cache device or flash only volume
+ * @cl: &bch_write_op->cl
+ *
+ * This is the starting point for any data to end up in a cache device; it could
+ * be from a normal write, or a writeback write, or a write to a flash only
+ * volume - it's also used by the moving garbage collector to compact data in
+ * mostly empty buckets.
+ *
+ * It first writes the data to the cache, creating a list of keys to be inserted
+ * (if the data won't fit in a single open bucket, there will be multiple keys);
+ * after the data is written it calls bch_journal, and after the keys have been
+ * added to the next journal write they're inserted into the btree.
+ *
+ * If op->discard is true, instead of inserting the data it invalidates the
+ * region of the cache represented by op->bio and op->inode.
+ */
+void bch2_write(struct closure *cl)
+{
+ struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
+ struct bio *bio = &op->wbio.bio;
+ struct bch_fs *c = op->c;
+ unsigned data_len;
+
+ EBUG_ON(op->cl.parent);
+ BUG_ON(!op->nr_replicas);
+ BUG_ON(!op->write_point.v);
+ BUG_ON(bkey_eq(op->pos, POS_MAX));
+
+ op->start_time = local_clock();
+ bch2_keylist_init(&op->insert_keys, op->inline_keys);
+ wbio_init(bio)->put_bio = false;
+
+ if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ op->pos.offset << 9,
+ "misaligned write");
+ op->error = -EIO;
+ goto err;
+ }
+
+ if (c->opts.nochanges) {
+ op->error = -BCH_ERR_erofs_no_writes;
+ goto err;
+ }
+
+ if (!(op->flags & BCH_WRITE_MOVE) &&
+ !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
+ op->error = -BCH_ERR_erofs_no_writes;
+ goto err;
+ }
+
+ this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
+ bch2_increment_clock(c, bio_sectors(bio), WRITE);
+
+ data_len = min_t(u64, bio->bi_iter.bi_size,
+ op->new_i_size - (op->pos.offset << 9));
+
+ if (c->opts.inline_data &&
+ data_len <= min(block_bytes(c) / 2, 1024U)) {
+ bch2_write_data_inline(op, data_len);
+ return;
+ }
+
+ __bch2_write(op);
+ return;
+err:
+ bch2_disk_reservation_put(c, &op->res);
+
+ closure_debug_destroy(&op->cl);
+ if (op->end_io)
+ op->end_io(op);
+}
+
+static const char * const bch2_write_flags[] = {
+#define x(f) #f,
+ BCH_WRITE_FLAGS()
+#undef x
+ NULL
+};
+
+void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
+{
+ prt_str(out, "pos: ");
+ bch2_bpos_to_text(out, op->pos);
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_str(out, "started: ");
+ bch2_pr_time_units(out, local_clock() - op->start_time);
+ prt_newline(out);
+
+ prt_str(out, "flags: ");
+ prt_bitflags(out, bch2_write_flags, op->flags);
+ prt_newline(out);
+
+ prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
+ prt_newline(out);
+
+ printbuf_indent_sub(out, 2);
+}
+
+void bch2_fs_io_write_exit(struct bch_fs *c)
+{
+ mempool_exit(&c->bio_bounce_pages);
+ bioset_exit(&c->bio_write);
+}
+
+int bch2_fs_io_write_init(struct bch_fs *c)
+{
+ if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
+ BIOSET_NEED_BVECS))
+ return -BCH_ERR_ENOMEM_bio_write_init;
+
+ if (mempool_init_page_pool(&c->bio_bounce_pages,
+ max_t(unsigned,
+ c->opts.btree_node_size,
+ c->opts.encoded_extent_max) /
+ PAGE_SIZE, 0))
+ return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
+
+ return 0;
+}
diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h
new file mode 100644
index 000000000000..9323167229ee
--- /dev/null
+++ b/fs/bcachefs/io_write.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_IO_WRITE_H
+#define _BCACHEFS_IO_WRITE_H
+
+#include "checksum.h"
+#include "io_write_types.h"
+
+#define to_wbio(_bio) \
+ container_of((_bio), struct bch_write_bio, bio)
+
+void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
+void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
+
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
+void bch2_latency_acct(struct bch_dev *, u64, int);
+#else
+static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {}
+#endif
+
+void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
+ enum bch_data_type, const struct bkey_i *, bool);
+
+#define BCH_WRITE_FLAGS() \
+ x(ALLOC_NOWAIT) \
+ x(CACHED) \
+ x(DATA_ENCODED) \
+ x(PAGES_STABLE) \
+ x(PAGES_OWNED) \
+ x(ONLY_SPECIFIED_DEVS) \
+ x(WROTE_DATA_INLINE) \
+ x(FROM_INTERNAL) \
+ x(CHECK_ENOSPC) \
+ x(SYNC) \
+ x(MOVE) \
+ x(IN_WORKER) \
+ x(DONE) \
+ x(IO_ERROR) \
+ x(CONVERT_UNWRITTEN)
+
+enum __bch_write_flags {
+#define x(f) __BCH_WRITE_##f,
+ BCH_WRITE_FLAGS()
+#undef x
+};
+
+enum bch_write_flags {
+#define x(f) BCH_WRITE_##f = BIT(__BCH_WRITE_##f),
+ BCH_WRITE_FLAGS()
+#undef x
+};
+
+static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
+{
+ return op->watermark == BCH_WATERMARK_copygc
+ ? op->c->copygc_wq
+ : op->c->btree_update_wq;
+}
+
+int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
+ struct bkey_i *, bool *, s64 *, s64 *);
+int bch2_extent_update(struct btree_trans *, subvol_inum,
+ struct btree_iter *, struct bkey_i *,
+ struct disk_reservation *, u64, s64 *, bool);
+
+static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
+ struct bch_io_opts opts)
+{
+ op->c = c;
+ op->end_io = NULL;
+ op->flags = 0;
+ op->written = 0;
+ op->error = 0;
+ op->csum_type = bch2_data_checksum_type(c, opts);
+ op->compression_opt = opts.compression;
+ op->nr_replicas = 0;
+ op->nr_replicas_required = c->opts.data_replicas_required;
+ op->watermark = BCH_WATERMARK_normal;
+ op->incompressible = 0;
+ op->open_buckets.nr = 0;
+ op->devs_have.nr = 0;
+ op->target = 0;
+ op->opts = opts;
+ op->subvol = 0;
+ op->pos = POS_MAX;
+ op->version = ZERO_VERSION;
+ op->write_point = (struct write_point_specifier) { 0 };
+ op->res = (struct disk_reservation) { 0 };
+ op->new_i_size = U64_MAX;
+ op->i_sectors_delta = 0;
+ op->devs_need_flush = NULL;
+}
+
+void bch2_write(struct closure *);
+
+void bch2_write_point_do_index_updates(struct work_struct *);
+
+static inline struct bch_write_bio *wbio_init(struct bio *bio)
+{
+ struct bch_write_bio *wbio = to_wbio(bio);
+
+ memset(&wbio->wbio, 0, sizeof(wbio->wbio));
+ return wbio;
+}
+
+void bch2_write_op_to_text(struct printbuf *, struct bch_write_op *);
+
+void bch2_fs_io_write_exit(struct bch_fs *);
+int bch2_fs_io_write_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_IO_WRITE_H */
diff --git a/fs/bcachefs/io_write_types.h b/fs/bcachefs/io_write_types.h
new file mode 100644
index 000000000000..c7f97c2c4805
--- /dev/null
+++ b/fs/bcachefs/io_write_types.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_IO_WRITE_TYPES_H
+#define _BCACHEFS_IO_WRITE_TYPES_H
+
+#include "alloc_types.h"
+#include "btree_types.h"
+#include "buckets_types.h"
+#include "extents_types.h"
+#include "keylist_types.h"
+#include "opts.h"
+#include "super_types.h"
+
+#include <linux/llist.h>
+#include <linux/workqueue.h>
+
+struct bch_write_bio {
+ struct_group(wbio,
+ struct bch_fs *c;
+ struct bch_write_bio *parent;
+
+ u64 submit_time;
+ u64 inode_offset;
+
+ struct bch_devs_list failed;
+ u8 dev;
+
+ unsigned split:1,
+ bounce:1,
+ put_bio:1,
+ have_ioref:1,
+ nocow:1,
+ used_mempool:1,
+ first_btree_write:1;
+ );
+
+ struct bio bio;
+};
+
+struct bch_write_op {
+ struct closure cl;
+ struct bch_fs *c;
+ void (*end_io)(struct bch_write_op *);
+ u64 start_time;
+
+ unsigned written; /* sectors */
+ u16 flags;
+ s16 error; /* dio write path expects it to hold -ERESTARTSYS... */
+
+ unsigned compression_opt:8;
+ unsigned csum_type:4;
+ unsigned nr_replicas:4;
+ unsigned nr_replicas_required:4;
+ unsigned watermark:3;
+ unsigned incompressible:1;
+ unsigned stripe_waited:1;
+
+ struct bch_devs_list devs_have;
+ u16 target;
+ u16 nonce;
+ struct bch_io_opts opts;
+
+ u32 subvol;
+ struct bpos pos;
+ struct bversion version;
+
+ /* For BCH_WRITE_DATA_ENCODED: */
+ struct bch_extent_crc_unpacked crc;
+
+ struct write_point_specifier write_point;
+
+ struct write_point *wp;
+ struct list_head wp_list;
+
+ struct disk_reservation res;
+
+ struct open_buckets open_buckets;
+
+ u64 new_i_size;
+ s64 i_sectors_delta;
+
+ struct bch_devs_mask failed;
+
+ struct keylist insert_keys;
+ u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
+
+ /*
+ * Bitmask of devices that have had nocow writes issued to them since
+ * last flush:
+ */
+ struct bch_devs_mask *devs_need_flush;
+
+ /* Must be last: */
+ struct bch_write_bio wbio;
+};
+
+#endif /* _BCACHEFS_IO_WRITE_TYPES_H */
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
new file mode 100644
index 000000000000..0e7a9ffa3671
--- /dev/null
+++ b/fs/bcachefs/journal.c
@@ -0,0 +1,1449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bcachefs journalling code, for btree insertions
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "bkey_methods.h"
+#include "btree_gc.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_io.h"
+#include "journal_reclaim.h"
+#include "journal_sb.h"
+#include "journal_seq_blacklist.h"
+#include "trace.h"
+
+static const char * const bch2_journal_errors[] = {
+#define x(n) #n,
+ JOURNAL_ERRORS()
+#undef x
+ NULL
+};
+
+static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
+{
+ return seq > j->seq_ondisk;
+}
+
+static bool __journal_entry_is_open(union journal_res_state state)
+{
+ return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
+}
+
+static inline unsigned nr_unwritten_journal_entries(struct journal *j)
+{
+ return atomic64_read(&j->seq) - j->seq_ondisk;
+}
+
+static bool journal_entry_is_open(struct journal *j)
+{
+ return __journal_entry_is_open(j->reservations);
+}
+
+static inline struct journal_buf *
+journal_seq_to_buf(struct journal *j, u64 seq)
+{
+ struct journal_buf *buf = NULL;
+
+ EBUG_ON(seq > journal_cur_seq(j));
+
+ if (journal_seq_unwritten(j, seq)) {
+ buf = j->buf + (seq & JOURNAL_BUF_MASK);
+ EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
+ }
+ return buf;
+}
+
+static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(p->list); i++)
+ INIT_LIST_HEAD(&p->list[i]);
+ INIT_LIST_HEAD(&p->flushed);
+ atomic_set(&p->count, count);
+ p->devs.nr = 0;
+}
+
+/*
+ * Detect stuck journal conditions and trigger shutdown. Technically the journal
+ * can end up stuck for a variety of reasons, such as a blocked I/O, journal
+ * reservation lockup, etc. Since this is a fatal error with potentially
+ * unpredictable characteristics, we want to be fairly conservative before we
+ * decide to shut things down.
+ *
+ * Consider the journal stuck when it appears full with no ability to commit
+ * btree transactions, to discard journal buckets, nor acquire priority
+ * (reserved watermark) reservation.
+ */
+static inline bool
+journal_error_check_stuck(struct journal *j, int error, unsigned flags)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bool stuck = false;
+ struct printbuf buf = PRINTBUF;
+
+ if (!(error == JOURNAL_ERR_journal_full ||
+ error == JOURNAL_ERR_journal_pin_full) ||
+ nr_unwritten_journal_entries(j) ||
+ (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
+ return stuck;
+
+ spin_lock(&j->lock);
+
+ if (j->can_discard) {
+ spin_unlock(&j->lock);
+ return stuck;
+ }
+
+ stuck = true;
+
+ /*
+ * The journal shutdown path will set ->err_seq, but do it here first to
+ * serialize against concurrent failures and avoid duplicate error
+ * reports.
+ */
+ if (j->err_seq) {
+ spin_unlock(&j->lock);
+ return stuck;
+ }
+ j->err_seq = journal_cur_seq(j);
+ spin_unlock(&j->lock);
+
+ bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
+ bch2_journal_errors[error]);
+ bch2_journal_debug_to_text(&buf, j);
+ bch_err(c, "%s", buf.buf);
+
+ printbuf_reset(&buf);
+ bch2_journal_pins_to_text(&buf, j);
+ bch_err(c, "Journal pins:\n%s", buf.buf);
+ printbuf_exit(&buf);
+
+ bch2_fatal_error(c);
+ dump_stack();
+
+ return stuck;
+}
+
+/*
+ * Final processing when the last reference of a journal buffer has been
+ * dropped. Drop the pin list reference acquired at journal entry open and write
+ * the buffer, if requested.
+ */
+void bch2_journal_buf_put_final(struct journal *j, u64 seq, bool write)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+
+ lockdep_assert_held(&j->lock);
+
+ if (__bch2_journal_pin_put(j, seq))
+ bch2_journal_reclaim_fast(j);
+ if (write)
+ closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
+}
+
+/*
+ * Returns true if journal entry is now closed:
+ *
+ * We don't close a journal_buf until the next journal_buf is finished writing,
+ * and can be opened again - this also initializes the next journal_buf:
+ */
+static void __journal_entry_close(struct journal *j, unsigned closed_val)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct journal_buf *buf = journal_cur_buf(j);
+ union journal_res_state old, new;
+ u64 v = atomic64_read(&j->reservations.counter);
+ unsigned sectors;
+
+ BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
+ closed_val != JOURNAL_ENTRY_ERROR_VAL);
+
+ lockdep_assert_held(&j->lock);
+
+ do {
+ old.v = new.v = v;
+ new.cur_entry_offset = closed_val;
+
+ if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
+ old.cur_entry_offset == new.cur_entry_offset)
+ return;
+ } while ((v = atomic64_cmpxchg(&j->reservations.counter,
+ old.v, new.v)) != old.v);
+
+ if (!__journal_entry_is_open(old))
+ return;
+
+ /* Close out old buffer: */
+ buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
+
+ sectors = vstruct_blocks_plus(buf->data, c->block_bits,
+ buf->u64s_reserved) << c->block_bits;
+ BUG_ON(sectors > buf->sectors);
+ buf->sectors = sectors;
+
+ /*
+ * We have to set last_seq here, _before_ opening a new journal entry:
+ *
+ * A threads may replace an old pin with a new pin on their current
+ * journal reservation - the expectation being that the journal will
+ * contain either what the old pin protected or what the new pin
+ * protects.
+ *
+ * After the old pin is dropped journal_last_seq() won't include the old
+ * pin, so we can only write the updated last_seq on the entry that
+ * contains whatever the new pin protects.
+ *
+ * Restated, we can _not_ update last_seq for a given entry if there
+ * could be a newer entry open with reservations/pins that have been
+ * taken against it.
+ *
+ * Hence, we want update/set last_seq on the current journal entry right
+ * before we open a new one:
+ */
+ buf->last_seq = journal_last_seq(j);
+ buf->data->last_seq = cpu_to_le64(buf->last_seq);
+ BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
+
+ cancel_delayed_work(&j->write_work);
+
+ bch2_journal_space_available(j);
+
+ __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
+}
+
+void bch2_journal_halt(struct journal *j)
+{
+ spin_lock(&j->lock);
+ __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
+ if (!j->err_seq)
+ j->err_seq = journal_cur_seq(j);
+ journal_wake(j);
+ spin_unlock(&j->lock);
+}
+
+static bool journal_entry_want_write(struct journal *j)
+{
+ bool ret = !journal_entry_is_open(j) ||
+ journal_cur_seq(j) == journal_last_unwritten_seq(j);
+
+ /* Don't close it yet if we already have a write in flight: */
+ if (ret)
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ else if (nr_unwritten_journal_entries(j)) {
+ struct journal_buf *buf = journal_cur_buf(j);
+
+ if (!buf->flush_time) {
+ buf->flush_time = local_clock() ?: 1;
+ buf->expires = jiffies;
+ }
+ }
+
+ return ret;
+}
+
+static bool journal_entry_close(struct journal *j)
+{
+ bool ret;
+
+ spin_lock(&j->lock);
+ ret = journal_entry_want_write(j);
+ spin_unlock(&j->lock);
+
+ return ret;
+}
+
+/*
+ * should _only_ called from journal_res_get() - when we actually want a
+ * journal reservation - journal entry is open means journal is dirty:
+ */
+static int journal_entry_open(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct journal_buf *buf = j->buf +
+ ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
+ union journal_res_state old, new;
+ int u64s;
+ u64 v;
+
+ lockdep_assert_held(&j->lock);
+ BUG_ON(journal_entry_is_open(j));
+ BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
+
+ if (j->blocked)
+ return JOURNAL_ERR_blocked;
+
+ if (j->cur_entry_error)
+ return j->cur_entry_error;
+
+ if (bch2_journal_error(j))
+ return JOURNAL_ERR_insufficient_devices; /* -EROFS */
+
+ if (!fifo_free(&j->pin))
+ return JOURNAL_ERR_journal_pin_full;
+
+ if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
+ return JOURNAL_ERR_max_in_flight;
+
+ BUG_ON(!j->cur_entry_sectors);
+
+ buf->expires =
+ (journal_cur_seq(j) == j->flushed_seq_ondisk
+ ? jiffies
+ : j->last_flush_write) +
+ msecs_to_jiffies(c->opts.journal_flush_delay);
+
+ buf->u64s_reserved = j->entry_u64s_reserved;
+ buf->disk_sectors = j->cur_entry_sectors;
+ buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
+
+ u64s = (int) (buf->sectors << 9) / sizeof(u64) -
+ journal_entry_overhead(j);
+ u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
+
+ if (u64s <= (ssize_t) j->early_journal_entries.nr)
+ return JOURNAL_ERR_journal_full;
+
+ if (fifo_empty(&j->pin) && j->reclaim_thread)
+ wake_up_process(j->reclaim_thread);
+
+ /*
+ * The fifo_push() needs to happen at the same time as j->seq is
+ * incremented for journal_last_seq() to be calculated correctly
+ */
+ atomic64_inc(&j->seq);
+ journal_pin_list_init(fifo_push_ref(&j->pin), 1);
+
+ BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
+
+ bkey_extent_init(&buf->key);
+ buf->noflush = false;
+ buf->must_flush = false;
+ buf->separate_flush = false;
+ buf->flush_time = 0;
+
+ memset(buf->data, 0, sizeof(*buf->data));
+ buf->data->seq = cpu_to_le64(journal_cur_seq(j));
+ buf->data->u64s = 0;
+
+ if (j->early_journal_entries.nr) {
+ memcpy(buf->data->_data, j->early_journal_entries.data,
+ j->early_journal_entries.nr * sizeof(u64));
+ le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
+ }
+
+ /*
+ * Must be set before marking the journal entry as open:
+ */
+ j->cur_entry_u64s = u64s;
+
+ v = atomic64_read(&j->reservations.counter);
+ do {
+ old.v = new.v = v;
+
+ BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
+
+ new.idx++;
+ BUG_ON(journal_state_count(new, new.idx));
+ BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
+
+ journal_state_inc(&new);
+
+ /* Handle any already added entries */
+ new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
+ } while ((v = atomic64_cmpxchg(&j->reservations.counter,
+ old.v, new.v)) != old.v);
+
+ if (j->res_get_blocked_start)
+ bch2_time_stats_update(j->blocked_time,
+ j->res_get_blocked_start);
+ j->res_get_blocked_start = 0;
+
+ mod_delayed_work(c->io_complete_wq,
+ &j->write_work,
+ msecs_to_jiffies(c->opts.journal_flush_delay));
+ journal_wake(j);
+
+ if (j->early_journal_entries.nr)
+ darray_exit(&j->early_journal_entries);
+ return 0;
+}
+
+static bool journal_quiesced(struct journal *j)
+{
+ bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
+
+ if (!ret)
+ journal_entry_close(j);
+ return ret;
+}
+
+static void journal_quiesce(struct journal *j)
+{
+ wait_event(j->wait, journal_quiesced(j));
+}
+
+static void journal_write_work(struct work_struct *work)
+{
+ struct journal *j = container_of(work, struct journal, write_work.work);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ long delta;
+
+ spin_lock(&j->lock);
+ if (!__journal_entry_is_open(j->reservations))
+ goto unlock;
+
+ delta = journal_cur_buf(j)->expires - jiffies;
+
+ if (delta > 0)
+ mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
+ else
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+unlock:
+ spin_unlock(&j->lock);
+}
+
+static int __journal_res_get(struct journal *j, struct journal_res *res,
+ unsigned flags)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct journal_buf *buf;
+ bool can_discard;
+ int ret;
+retry:
+ if (journal_res_get_fast(j, res, flags))
+ return 0;
+
+ if (bch2_journal_error(j))
+ return -BCH_ERR_erofs_journal_err;
+
+ spin_lock(&j->lock);
+
+ /* check once more in case somebody else shut things down... */
+ if (bch2_journal_error(j)) {
+ spin_unlock(&j->lock);
+ return -BCH_ERR_erofs_journal_err;
+ }
+
+ /*
+ * Recheck after taking the lock, so we don't race with another thread
+ * that just did journal_entry_open() and call journal_entry_close()
+ * unnecessarily
+ */
+ if (journal_res_get_fast(j, res, flags)) {
+ spin_unlock(&j->lock);
+ return 0;
+ }
+
+ if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
+ /*
+ * Don't want to close current journal entry, just need to
+ * invoke reclaim:
+ */
+ ret = JOURNAL_ERR_journal_full;
+ goto unlock;
+ }
+
+ /*
+ * If we couldn't get a reservation because the current buf filled up,
+ * and we had room for a bigger entry on disk, signal that we want to
+ * realloc the journal bufs:
+ */
+ buf = journal_cur_buf(j);
+ if (journal_entry_is_open(j) &&
+ buf->buf_size >> 9 < buf->disk_sectors &&
+ buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
+ j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
+
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ ret = journal_entry_open(j);
+
+ if (ret == JOURNAL_ERR_max_in_flight)
+ trace_and_count(c, journal_entry_full, c);
+unlock:
+ if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
+ !j->res_get_blocked_start) {
+ j->res_get_blocked_start = local_clock() ?: 1;
+ trace_and_count(c, journal_full, c);
+ }
+
+ can_discard = j->can_discard;
+ spin_unlock(&j->lock);
+
+ if (!ret)
+ goto retry;
+ if (journal_error_check_stuck(j, ret, flags))
+ ret = -BCH_ERR_journal_res_get_blocked;
+
+ /*
+ * Journal is full - can't rely on reclaim from work item due to
+ * freezing:
+ */
+ if ((ret == JOURNAL_ERR_journal_full ||
+ ret == JOURNAL_ERR_journal_pin_full) &&
+ !(flags & JOURNAL_RES_GET_NONBLOCK)) {
+ if (can_discard) {
+ bch2_journal_do_discards(j);
+ goto retry;
+ }
+
+ if (mutex_trylock(&j->reclaim_lock)) {
+ bch2_journal_reclaim(j);
+ mutex_unlock(&j->reclaim_lock);
+ }
+ }
+
+ return ret == JOURNAL_ERR_insufficient_devices
+ ? -BCH_ERR_erofs_journal_err
+ : -BCH_ERR_journal_res_get_blocked;
+}
+
+/*
+ * Essentially the entry function to the journaling code. When bcachefs is doing
+ * a btree insert, it calls this function to get the current journal write.
+ * Journal write is the structure used set up journal writes. The calling
+ * function will then add its keys to the structure, queuing them for the next
+ * write.
+ *
+ * To ensure forward progress, the current task must not be holding any
+ * btree node write locks.
+ */
+int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
+ unsigned flags)
+{
+ int ret;
+
+ closure_wait_event(&j->async_wait,
+ (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
+ (flags & JOURNAL_RES_GET_NONBLOCK));
+ return ret;
+}
+
+/* journal_preres: */
+
+static bool journal_preres_available(struct journal *j,
+ struct journal_preres *res,
+ unsigned new_u64s,
+ unsigned flags)
+{
+ bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
+
+ if (!ret && mutex_trylock(&j->reclaim_lock)) {
+ bch2_journal_reclaim(j);
+ mutex_unlock(&j->reclaim_lock);
+ }
+
+ return ret;
+}
+
+int __bch2_journal_preres_get(struct journal *j,
+ struct journal_preres *res,
+ unsigned new_u64s,
+ unsigned flags)
+{
+ int ret;
+
+ closure_wait_event(&j->preres_wait,
+ (ret = bch2_journal_error(j)) ||
+ journal_preres_available(j, res, new_u64s, flags));
+ return ret;
+}
+
+/* journal_entry_res: */
+
+void bch2_journal_entry_res_resize(struct journal *j,
+ struct journal_entry_res *res,
+ unsigned new_u64s)
+{
+ union journal_res_state state;
+ int d = new_u64s - res->u64s;
+
+ spin_lock(&j->lock);
+
+ j->entry_u64s_reserved += d;
+ if (d <= 0)
+ goto out;
+
+ j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
+ smp_mb();
+ state = READ_ONCE(j->reservations);
+
+ if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
+ state.cur_entry_offset > j->cur_entry_u64s) {
+ j->cur_entry_u64s += d;
+ /*
+ * Not enough room in current journal entry, have to flush it:
+ */
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+ } else {
+ journal_cur_buf(j)->u64s_reserved += d;
+ }
+out:
+ spin_unlock(&j->lock);
+ res->u64s += d;
+}
+
+/* journal flushing: */
+
+/**
+ * bch2_journal_flush_seq_async - wait for a journal entry to be written
+ * @j: journal object
+ * @seq: seq to flush
+ * @parent: closure object to wait with
+ * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed,
+ * -EIO if @seq will never be flushed
+ *
+ * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
+ * necessary
+ */
+int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
+ struct closure *parent)
+{
+ struct journal_buf *buf;
+ int ret = 0;
+
+ if (seq <= j->flushed_seq_ondisk)
+ return 1;
+
+ spin_lock(&j->lock);
+
+ if (WARN_ONCE(seq > journal_cur_seq(j),
+ "requested to flush journal seq %llu, but currently at %llu",
+ seq, journal_cur_seq(j)))
+ goto out;
+
+ /* Recheck under lock: */
+ if (j->err_seq && seq >= j->err_seq) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (seq <= j->flushed_seq_ondisk) {
+ ret = 1;
+ goto out;
+ }
+
+ /* if seq was written, but not flushed - flush a newer one instead */
+ seq = max(seq, journal_last_unwritten_seq(j));
+
+recheck_need_open:
+ if (seq > journal_cur_seq(j)) {
+ struct journal_res res = { 0 };
+
+ if (journal_entry_is_open(j))
+ __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+
+ spin_unlock(&j->lock);
+
+ ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
+ if (ret)
+ return ret;
+
+ seq = res.seq;
+ buf = j->buf + (seq & JOURNAL_BUF_MASK);
+ buf->must_flush = true;
+
+ if (!buf->flush_time) {
+ buf->flush_time = local_clock() ?: 1;
+ buf->expires = jiffies;
+ }
+
+ if (parent && !closure_wait(&buf->wait, parent))
+ BUG();
+
+ bch2_journal_res_put(j, &res);
+
+ spin_lock(&j->lock);
+ goto want_write;
+ }
+
+ /*
+ * if write was kicked off without a flush, flush the next sequence
+ * number instead
+ */
+ buf = journal_seq_to_buf(j, seq);
+ if (buf->noflush) {
+ seq++;
+ goto recheck_need_open;
+ }
+
+ buf->must_flush = true;
+
+ if (parent && !closure_wait(&buf->wait, parent))
+ BUG();
+want_write:
+ if (seq == journal_cur_seq(j))
+ journal_entry_want_write(j);
+out:
+ spin_unlock(&j->lock);
+ return ret;
+}
+
+int bch2_journal_flush_seq(struct journal *j, u64 seq)
+{
+ u64 start_time = local_clock();
+ int ret, ret2;
+
+ /*
+ * Don't update time_stats when @seq is already flushed:
+ */
+ if (seq <= j->flushed_seq_ondisk)
+ return 0;
+
+ ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
+
+ if (!ret)
+ bch2_time_stats_update(j->flush_seq_time, start_time);
+
+ return ret ?: ret2 < 0 ? ret2 : 0;
+}
+
+/*
+ * bch2_journal_flush_async - if there is an open journal entry, or a journal
+ * still being written, write it and wait for the write to complete
+ */
+void bch2_journal_flush_async(struct journal *j, struct closure *parent)
+{
+ bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
+}
+
+int bch2_journal_flush(struct journal *j)
+{
+ return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
+}
+
+/*
+ * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
+ * @seq
+ */
+bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ u64 unwritten_seq;
+ bool ret = false;
+
+ if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
+ return false;
+
+ if (seq <= c->journal.flushed_seq_ondisk)
+ return false;
+
+ spin_lock(&j->lock);
+ if (seq <= c->journal.flushed_seq_ondisk)
+ goto out;
+
+ for (unwritten_seq = journal_last_unwritten_seq(j);
+ unwritten_seq < seq;
+ unwritten_seq++) {
+ struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
+
+ /* journal write is already in flight, and was a flush write: */
+ if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
+ goto out;
+
+ buf->noflush = true;
+ }
+
+ ret = true;
+out:
+ spin_unlock(&j->lock);
+ return ret;
+}
+
+int bch2_journal_meta(struct journal *j)
+{
+ struct journal_buf *buf;
+ struct journal_res res;
+ int ret;
+
+ memset(&res, 0, sizeof(res));
+
+ ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
+ if (ret)
+ return ret;
+
+ buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
+ buf->must_flush = true;
+
+ if (!buf->flush_time) {
+ buf->flush_time = local_clock() ?: 1;
+ buf->expires = jiffies;
+ }
+
+ bch2_journal_res_put(j, &res);
+
+ return bch2_journal_flush_seq(j, res.seq);
+}
+
+/* block/unlock the journal: */
+
+void bch2_journal_unblock(struct journal *j)
+{
+ spin_lock(&j->lock);
+ j->blocked--;
+ spin_unlock(&j->lock);
+
+ journal_wake(j);
+}
+
+void bch2_journal_block(struct journal *j)
+{
+ spin_lock(&j->lock);
+ j->blocked++;
+ spin_unlock(&j->lock);
+
+ journal_quiesce(j);
+}
+
+/* allocate journal on a device: */
+
+static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
+ bool new_fs, struct closure *cl)
+{
+ struct bch_fs *c = ca->fs;
+ struct journal_device *ja = &ca->journal;
+ u64 *new_bucket_seq = NULL, *new_buckets = NULL;
+ struct open_bucket **ob = NULL;
+ long *bu = NULL;
+ unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
+ int ret = 0;
+
+ BUG_ON(nr <= ja->nr);
+
+ bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
+ ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
+ new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
+ new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
+ if (!bu || !ob || !new_buckets || !new_bucket_seq) {
+ ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+ goto err_free;
+ }
+
+ for (nr_got = 0; nr_got < nr_want; nr_got++) {
+ if (new_fs) {
+ bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
+ if (bu[nr_got] < 0) {
+ ret = -BCH_ERR_ENOSPC_bucket_alloc;
+ break;
+ }
+ } else {
+ ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
+ ret = PTR_ERR_OR_ZERO(ob[nr_got]);
+ if (ret)
+ break;
+
+ ret = bch2_trans_run(c,
+ bch2_trans_mark_metadata_bucket(trans, ca,
+ ob[nr_got]->bucket, BCH_DATA_journal,
+ ca->mi.bucket_size));
+ if (ret) {
+ bch2_open_bucket_put(c, ob[nr_got]);
+ bch_err_msg(c, ret, "marking new journal buckets");
+ break;
+ }
+
+ bu[nr_got] = ob[nr_got]->bucket;
+ }
+ }
+
+ if (!nr_got)
+ goto err_free;
+
+ /* Don't return an error if we successfully allocated some buckets: */
+ ret = 0;
+
+ if (c) {
+ bch2_journal_flush_all_pins(&c->journal);
+ bch2_journal_block(&c->journal);
+ mutex_lock(&c->sb_lock);
+ }
+
+ memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
+ memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
+
+ BUG_ON(ja->discard_idx > ja->nr);
+
+ pos = ja->discard_idx ?: ja->nr;
+
+ memmove(new_buckets + pos + nr_got,
+ new_buckets + pos,
+ sizeof(new_buckets[0]) * (ja->nr - pos));
+ memmove(new_bucket_seq + pos + nr_got,
+ new_bucket_seq + pos,
+ sizeof(new_bucket_seq[0]) * (ja->nr - pos));
+
+ for (i = 0; i < nr_got; i++) {
+ new_buckets[pos + i] = bu[i];
+ new_bucket_seq[pos + i] = 0;
+ }
+
+ nr = ja->nr + nr_got;
+
+ ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
+ if (ret)
+ goto err_unblock;
+
+ if (!new_fs)
+ bch2_write_super(c);
+
+ /* Commit: */
+ if (c)
+ spin_lock(&c->journal.lock);
+
+ swap(new_buckets, ja->buckets);
+ swap(new_bucket_seq, ja->bucket_seq);
+ ja->nr = nr;
+
+ if (pos <= ja->discard_idx)
+ ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
+ if (pos <= ja->dirty_idx_ondisk)
+ ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
+ if (pos <= ja->dirty_idx)
+ ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
+ if (pos <= ja->cur_idx)
+ ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
+
+ if (c)
+ spin_unlock(&c->journal.lock);
+err_unblock:
+ if (c) {
+ bch2_journal_unblock(&c->journal);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ if (ret && !new_fs)
+ for (i = 0; i < nr_got; i++)
+ bch2_trans_run(c,
+ bch2_trans_mark_metadata_bucket(trans, ca,
+ bu[i], BCH_DATA_free, 0));
+err_free:
+ if (!new_fs)
+ for (i = 0; i < nr_got; i++)
+ bch2_open_bucket_put(c, ob[i]);
+
+ kfree(new_bucket_seq);
+ kfree(new_buckets);
+ kfree(ob);
+ kfree(bu);
+ return ret;
+}
+
+/*
+ * Allocate more journal space at runtime - not currently making use if it, but
+ * the code works:
+ */
+int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
+ unsigned nr)
+{
+ struct journal_device *ja = &ca->journal;
+ struct closure cl;
+ int ret = 0;
+
+ closure_init_stack(&cl);
+
+ down_write(&c->state_lock);
+
+ /* don't handle reducing nr of buckets yet: */
+ if (nr < ja->nr)
+ goto unlock;
+
+ while (ja->nr < nr) {
+ struct disk_reservation disk_res = { 0, 0, 0 };
+
+ /*
+ * note: journal buckets aren't really counted as _sectors_ used yet, so
+ * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
+ * when space used goes up without a reservation - but we do need the
+ * reservation to ensure we'll actually be able to allocate:
+ *
+ * XXX: that's not right, disk reservations only ensure a
+ * filesystem-wide allocation will succeed, this is a device
+ * specific allocation - we can hang here:
+ */
+
+ ret = bch2_disk_reservation_get(c, &disk_res,
+ bucket_to_sector(ca, nr - ja->nr), 1, 0);
+ if (ret)
+ break;
+
+ ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
+
+ bch2_disk_reservation_put(c, &disk_res);
+
+ closure_sync(&cl);
+
+ if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
+ break;
+ }
+
+ if (ret)
+ bch_err_fn(c, ret);
+unlock:
+ up_write(&c->state_lock);
+ return ret;
+}
+
+int bch2_dev_journal_alloc(struct bch_dev *ca)
+{
+ unsigned nr;
+ int ret;
+
+ if (dynamic_fault("bcachefs:add:journal_alloc")) {
+ ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+ goto err;
+ }
+
+ /* 1/128th of the device by default: */
+ nr = ca->mi.nbuckets >> 7;
+
+ /*
+ * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
+ * is smaller:
+ */
+ nr = clamp_t(unsigned, nr,
+ BCH_JOURNAL_BUCKETS_MIN,
+ min(1 << 13,
+ (1 << 24) / ca->mi.bucket_size));
+
+ ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
+err:
+ if (ret)
+ bch_err_fn(ca, ret);
+ return ret;
+}
+
+/* startup/shutdown: */
+
+static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
+{
+ bool ret = false;
+ u64 seq;
+
+ spin_lock(&j->lock);
+ for (seq = journal_last_unwritten_seq(j);
+ seq <= journal_cur_seq(j) && !ret;
+ seq++) {
+ struct journal_buf *buf = journal_seq_to_buf(j, seq);
+
+ if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
+ ret = true;
+ }
+ spin_unlock(&j->lock);
+
+ return ret;
+}
+
+void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
+{
+ wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
+}
+
+void bch2_fs_journal_stop(struct journal *j)
+{
+ bch2_journal_reclaim_stop(j);
+ bch2_journal_flush_all_pins(j);
+
+ wait_event(j->wait, journal_entry_close(j));
+
+ /*
+ * Always write a new journal entry, to make sure the clock hands are up
+ * to date (and match the superblock)
+ */
+ bch2_journal_meta(j);
+
+ journal_quiesce(j);
+
+ BUG_ON(!bch2_journal_error(j) &&
+ test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
+ j->last_empty_seq != journal_cur_seq(j));
+
+ cancel_delayed_work_sync(&j->write_work);
+}
+
+int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct journal_entry_pin_list *p;
+ struct journal_replay *i, **_i;
+ struct genradix_iter iter;
+ bool had_entries = false;
+ unsigned ptr;
+ u64 last_seq = cur_seq, nr, seq;
+
+ genradix_for_each_reverse(&c->journal_entries, iter, _i) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ last_seq = le64_to_cpu(i->j.last_seq);
+ break;
+ }
+
+ nr = cur_seq - last_seq;
+
+ if (nr + 1 > j->pin.size) {
+ free_fifo(&j->pin);
+ init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
+ if (!j->pin.data) {
+ bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
+ return -BCH_ERR_ENOMEM_journal_pin_fifo;
+ }
+ }
+
+ j->replay_journal_seq = last_seq;
+ j->replay_journal_seq_end = cur_seq;
+ j->last_seq_ondisk = last_seq;
+ j->flushed_seq_ondisk = cur_seq - 1;
+ j->seq_ondisk = cur_seq - 1;
+ j->pin.front = last_seq;
+ j->pin.back = cur_seq;
+ atomic64_set(&j->seq, cur_seq - 1);
+
+ fifo_for_each_entry_ptr(p, &j->pin, seq)
+ journal_pin_list_init(p, 1);
+
+ genradix_for_each(&c->journal_entries, iter, _i) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ seq = le64_to_cpu(i->j.seq);
+ BUG_ON(seq >= cur_seq);
+
+ if (seq < last_seq)
+ continue;
+
+ if (journal_entry_empty(&i->j))
+ j->last_empty_seq = le64_to_cpu(i->j.seq);
+
+ p = journal_seq_pin(j, seq);
+
+ p->devs.nr = 0;
+ for (ptr = 0; ptr < i->nr_ptrs; ptr++)
+ bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
+
+ had_entries = true;
+ }
+
+ if (!had_entries)
+ j->last_empty_seq = cur_seq;
+
+ spin_lock(&j->lock);
+
+ set_bit(JOURNAL_STARTED, &j->flags);
+ j->last_flush_write = jiffies;
+
+ j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
+ j->reservations.unwritten_idx++;
+
+ c->last_bucket_seq_cleanup = journal_cur_seq(j);
+
+ bch2_journal_space_available(j);
+ spin_unlock(&j->lock);
+
+ return bch2_journal_reclaim_start(j);
+}
+
+/* init/exit: */
+
+void bch2_dev_journal_exit(struct bch_dev *ca)
+{
+ kfree(ca->journal.bio);
+ kfree(ca->journal.buckets);
+ kfree(ca->journal.bucket_seq);
+
+ ca->journal.bio = NULL;
+ ca->journal.buckets = NULL;
+ ca->journal.bucket_seq = NULL;
+}
+
+int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
+{
+ struct journal_device *ja = &ca->journal;
+ struct bch_sb_field_journal *journal_buckets =
+ bch2_sb_field_get(sb, journal);
+ struct bch_sb_field_journal_v2 *journal_buckets_v2 =
+ bch2_sb_field_get(sb, journal_v2);
+ unsigned i, nr_bvecs;
+
+ ja->nr = 0;
+
+ if (journal_buckets_v2) {
+ unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
+
+ for (i = 0; i < nr; i++)
+ ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
+ } else if (journal_buckets) {
+ ja->nr = bch2_nr_journal_buckets(journal_buckets);
+ }
+
+ ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
+ if (!ja->bucket_seq)
+ return -BCH_ERR_ENOMEM_dev_journal_init;
+
+ nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
+
+ ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
+ if (!ca->journal.bio)
+ return -BCH_ERR_ENOMEM_dev_journal_init;
+
+ bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
+
+ ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
+ if (!ja->buckets)
+ return -BCH_ERR_ENOMEM_dev_journal_init;
+
+ if (journal_buckets_v2) {
+ unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
+ unsigned j, dst = 0;
+
+ for (i = 0; i < nr; i++)
+ for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
+ ja->buckets[dst++] =
+ le64_to_cpu(journal_buckets_v2->d[i].start) + j;
+ } else if (journal_buckets) {
+ for (i = 0; i < ja->nr; i++)
+ ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
+ }
+
+ return 0;
+}
+
+void bch2_fs_journal_exit(struct journal *j)
+{
+ unsigned i;
+
+ darray_exit(&j->early_journal_entries);
+
+ for (i = 0; i < ARRAY_SIZE(j->buf); i++)
+ kvpfree(j->buf[i].data, j->buf[i].buf_size);
+ free_fifo(&j->pin);
+}
+
+int bch2_fs_journal_init(struct journal *j)
+{
+ static struct lock_class_key res_key;
+ unsigned i;
+
+ spin_lock_init(&j->lock);
+ spin_lock_init(&j->err_lock);
+ init_waitqueue_head(&j->wait);
+ INIT_DELAYED_WORK(&j->write_work, journal_write_work);
+ init_waitqueue_head(&j->reclaim_wait);
+ init_waitqueue_head(&j->pin_flush_wait);
+ mutex_init(&j->reclaim_lock);
+ mutex_init(&j->discard_lock);
+
+ lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
+
+ atomic64_set(&j->reservations.counter,
+ ((union journal_res_state)
+ { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
+
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
+ return -BCH_ERR_ENOMEM_journal_pin_fifo;
+
+ for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
+ j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
+ j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
+ if (!j->buf[i].data)
+ return -BCH_ERR_ENOMEM_journal_buf;
+ }
+
+ j->pin.front = j->pin.back = 1;
+ return 0;
+}
+
+/* debug: */
+
+void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ union journal_res_state s;
+ struct bch_dev *ca;
+ unsigned long now = jiffies;
+ u64 seq;
+ unsigned i;
+
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 24);
+ out->atomic++;
+
+ rcu_read_lock();
+ s = READ_ONCE(j->reservations);
+
+ prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
+ prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
+ prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
+ prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
+ prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
+ prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
+ prt_printf(out, "prereserved:\t\t%u/%u\n", j->prereserved.reserved, j->prereserved.remaining);
+ prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]);
+ prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
+ prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
+ prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
+ prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
+ prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
+ prt_printf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked);
+ prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
+ ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
+ prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
+ prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
+ prt_printf(out, "current entry:\t\t");
+
+ switch (s.cur_entry_offset) {
+ case JOURNAL_ENTRY_ERROR_VAL:
+ prt_printf(out, "error");
+ break;
+ case JOURNAL_ENTRY_CLOSED_VAL:
+ prt_printf(out, "closed");
+ break;
+ default:
+ prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
+ break;
+ }
+
+ prt_newline(out);
+
+ for (seq = journal_cur_seq(j);
+ seq >= journal_last_unwritten_seq(j);
+ --seq) {
+ i = seq & JOURNAL_BUF_MASK;
+
+ prt_printf(out, "unwritten entry:");
+ prt_tab(out);
+ prt_printf(out, "%llu", seq);
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "refcount:");
+ prt_tab(out);
+ prt_printf(out, "%u", journal_state_count(s, i));
+ prt_newline(out);
+
+ prt_printf(out, "sectors:");
+ prt_tab(out);
+ prt_printf(out, "%u", j->buf[i].sectors);
+ prt_newline(out);
+
+ prt_printf(out, "expires");
+ prt_tab(out);
+ prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
+ prt_newline(out);
+
+ printbuf_indent_sub(out, 2);
+ }
+
+ prt_printf(out,
+ "replay done:\t\t%i\n",
+ test_bit(JOURNAL_REPLAY_DONE, &j->flags));
+
+ prt_printf(out, "space:\n");
+ prt_printf(out, "\tdiscarded\t%u:%u\n",
+ j->space[journal_space_discarded].next_entry,
+ j->space[journal_space_discarded].total);
+ prt_printf(out, "\tclean ondisk\t%u:%u\n",
+ j->space[journal_space_clean_ondisk].next_entry,
+ j->space[journal_space_clean_ondisk].total);
+ prt_printf(out, "\tclean\t\t%u:%u\n",
+ j->space[journal_space_clean].next_entry,
+ j->space[journal_space_clean].total);
+ prt_printf(out, "\ttotal\t\t%u:%u\n",
+ j->space[journal_space_total].next_entry,
+ j->space[journal_space_total].total);
+
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_journal]) {
+ struct journal_device *ja = &ca->journal;
+
+ if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
+ continue;
+
+ if (!ja->nr)
+ continue;
+
+ prt_printf(out, "dev %u:\n", i);
+ prt_printf(out, "\tnr\t\t%u\n", ja->nr);
+ prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
+ prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
+ prt_printf(out, "\tdiscard_idx\t%u\n", ja->discard_idx);
+ prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
+ prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
+ prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
+ }
+
+ rcu_read_unlock();
+
+ --out->atomic;
+}
+
+void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
+{
+ spin_lock(&j->lock);
+ __bch2_journal_debug_to_text(out, j);
+ spin_unlock(&j->lock);
+}
+
+bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
+{
+ struct journal_entry_pin_list *pin_list;
+ struct journal_entry_pin *pin;
+ unsigned i;
+
+ spin_lock(&j->lock);
+ *seq = max(*seq, j->pin.front);
+
+ if (*seq >= j->pin.back) {
+ spin_unlock(&j->lock);
+ return true;
+ }
+
+ out->atomic++;
+
+ pin_list = journal_seq_pin(j, *seq);
+
+ prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
+ list_for_each_entry(pin, &pin_list->list[i], list) {
+ prt_printf(out, "\t%px %ps", pin, pin->flush);
+ prt_newline(out);
+ }
+
+ if (!list_empty(&pin_list->flushed)) {
+ prt_printf(out, "flushed:");
+ prt_newline(out);
+ }
+
+ list_for_each_entry(pin, &pin_list->flushed, list) {
+ prt_printf(out, "\t%px %ps", pin, pin->flush);
+ prt_newline(out);
+ }
+
+ printbuf_indent_sub(out, 2);
+
+ --out->atomic;
+ spin_unlock(&j->lock);
+
+ return false;
+}
+
+void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
+{
+ u64 seq = 0;
+
+ while (!bch2_journal_seq_pins_to_text(out, j, &seq))
+ seq++;
+}
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
new file mode 100644
index 000000000000..491133cc52f3
--- /dev/null
+++ b/fs/bcachefs/journal.h
@@ -0,0 +1,548 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_H
+#define _BCACHEFS_JOURNAL_H
+
+/*
+ * THE JOURNAL:
+ *
+ * The primary purpose of the journal is to log updates (insertions) to the
+ * b-tree, to avoid having to do synchronous updates to the b-tree on disk.
+ *
+ * Without the journal, the b-tree is always internally consistent on
+ * disk - and in fact, in the earliest incarnations bcache didn't have a journal
+ * but did handle unclean shutdowns by doing all index updates synchronously
+ * (with coalescing).
+ *
+ * Updates to interior nodes still happen synchronously and without the journal
+ * (for simplicity) - this may change eventually but updates to interior nodes
+ * are rare enough it's not a huge priority.
+ *
+ * This means the journal is relatively separate from the b-tree; it consists of
+ * just a list of keys and journal replay consists of just redoing those
+ * insertions in same order that they appear in the journal.
+ *
+ * PERSISTENCE:
+ *
+ * For synchronous updates (where we're waiting on the index update to hit
+ * disk), the journal entry will be written out immediately (or as soon as
+ * possible, if the write for the previous journal entry was still in flight).
+ *
+ * Synchronous updates are specified by passing a closure (@flush_cl) to
+ * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
+ * down to the journalling code. That closure will wait on the journal write to
+ * complete (via closure_wait()).
+ *
+ * If the index update wasn't synchronous, the journal entry will be
+ * written out after 10 ms have elapsed, by default (the delay_ms field
+ * in struct journal).
+ *
+ * JOURNAL ENTRIES:
+ *
+ * A journal entry is variable size (struct jset), it's got a fixed length
+ * header and then a variable number of struct jset_entry entries.
+ *
+ * Journal entries are identified by monotonically increasing 64 bit sequence
+ * numbers - jset->seq; other places in the code refer to this sequence number.
+ *
+ * A jset_entry entry contains one or more bkeys (which is what gets inserted
+ * into the b-tree). We need a container to indicate which b-tree the key is
+ * for; also, the roots of the various b-trees are stored in jset_entry entries
+ * (one for each b-tree) - this lets us add new b-tree types without changing
+ * the on disk format.
+ *
+ * We also keep some things in the journal header that are logically part of the
+ * superblock - all the things that are frequently updated. This is for future
+ * bcache on raw flash support; the superblock (which will become another
+ * journal) can't be moved or wear leveled, so it contains just enough
+ * information to find the main journal, and the superblock only has to be
+ * rewritten when we want to move/wear level the main journal.
+ *
+ * JOURNAL LAYOUT ON DISK:
+ *
+ * The journal is written to a ringbuffer of buckets (which is kept in the
+ * superblock); the individual buckets are not necessarily contiguous on disk
+ * which means that journal entries are not allowed to span buckets, but also
+ * that we can resize the journal at runtime if desired (unimplemented).
+ *
+ * The journal buckets exist in the same pool as all the other buckets that are
+ * managed by the allocator and garbage collection - garbage collection marks
+ * the journal buckets as metadata buckets.
+ *
+ * OPEN/DIRTY JOURNAL ENTRIES:
+ *
+ * Open/dirty journal entries are journal entries that contain b-tree updates
+ * that have not yet been written out to the b-tree on disk. We have to track
+ * which journal entries are dirty, and we also have to avoid wrapping around
+ * the journal and overwriting old but still dirty journal entries with new
+ * journal entries.
+ *
+ * On disk, this is represented with the "last_seq" field of struct jset;
+ * last_seq is the first sequence number that journal replay has to replay.
+ *
+ * To avoid overwriting dirty journal entries on disk, we keep a mapping (in
+ * journal_device->seq) of for each journal bucket, the highest sequence number
+ * any journal entry it contains. Then, by comparing that against last_seq we
+ * can determine whether that journal bucket contains dirty journal entries or
+ * not.
+ *
+ * To track which journal entries are dirty, we maintain a fifo of refcounts
+ * (where each entry corresponds to a specific sequence number) - when a ref
+ * goes to 0, that journal entry is no longer dirty.
+ *
+ * Journalling of index updates is done at the same time as the b-tree itself is
+ * being modified (see btree_insert_key()); when we add the key to the journal
+ * the pending b-tree write takes a ref on the journal entry the key was added
+ * to. If a pending b-tree write would need to take refs on multiple dirty
+ * journal entries, it only keeps the ref on the oldest one (since a newer
+ * journal entry will still be replayed if an older entry was dirty).
+ *
+ * JOURNAL FILLING UP:
+ *
+ * There are two ways the journal could fill up; either we could run out of
+ * space to write to, or we could have too many open journal entries and run out
+ * of room in the fifo of refcounts. Since those refcounts are decremented
+ * without any locking we can't safely resize that fifo, so we handle it the
+ * same way.
+ *
+ * If the journal fills up, we start flushing dirty btree nodes until we can
+ * allocate space for a journal write again - preferentially flushing btree
+ * nodes that are pinning the oldest journal entries first.
+ */
+
+#include <linux/hash.h>
+
+#include "journal_types.h"
+
+struct bch_fs;
+
+static inline void journal_wake(struct journal *j)
+{
+ wake_up(&j->wait);
+ closure_wake_up(&j->async_wait);
+ closure_wake_up(&j->preres_wait);
+}
+
+static inline struct journal_buf *journal_cur_buf(struct journal *j)
+{
+ return j->buf + j->reservations.idx;
+}
+
+/* Sequence number of oldest dirty journal entry */
+
+static inline u64 journal_last_seq(struct journal *j)
+{
+ return j->pin.front;
+}
+
+static inline u64 journal_cur_seq(struct journal *j)
+{
+ EBUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
+
+ return j->pin.back - 1;
+}
+
+static inline u64 journal_last_unwritten_seq(struct journal *j)
+{
+ return j->seq_ondisk + 1;
+}
+
+static inline int journal_state_count(union journal_res_state s, int idx)
+{
+ switch (idx) {
+ case 0: return s.buf0_count;
+ case 1: return s.buf1_count;
+ case 2: return s.buf2_count;
+ case 3: return s.buf3_count;
+ }
+ BUG();
+}
+
+static inline void journal_state_inc(union journal_res_state *s)
+{
+ s->buf0_count += s->idx == 0;
+ s->buf1_count += s->idx == 1;
+ s->buf2_count += s->idx == 2;
+ s->buf3_count += s->idx == 3;
+}
+
+/*
+ * Amount of space that will be taken up by some keys in the journal (i.e.
+ * including the jset header)
+ */
+static inline unsigned jset_u64s(unsigned u64s)
+{
+ return u64s + sizeof(struct jset_entry) / sizeof(u64);
+}
+
+static inline int journal_entry_overhead(struct journal *j)
+{
+ return sizeof(struct jset) / sizeof(u64) + j->entry_u64s_reserved;
+}
+
+static inline struct jset_entry *
+bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s)
+{
+ struct jset *jset = buf->data;
+ struct jset_entry *entry = vstruct_idx(jset, le32_to_cpu(jset->u64s));
+
+ memset(entry, 0, sizeof(*entry));
+ entry->u64s = cpu_to_le16(u64s);
+
+ le32_add_cpu(&jset->u64s, jset_u64s(u64s));
+
+ return entry;
+}
+
+static inline struct jset_entry *
+journal_res_entry(struct journal *j, struct journal_res *res)
+{
+ return vstruct_idx(j->buf[res->idx].data, res->offset);
+}
+
+static inline unsigned journal_entry_init(struct jset_entry *entry, unsigned type,
+ enum btree_id id, unsigned level,
+ unsigned u64s)
+{
+ entry->u64s = cpu_to_le16(u64s);
+ entry->btree_id = id;
+ entry->level = level;
+ entry->type = type;
+ entry->pad[0] = 0;
+ entry->pad[1] = 0;
+ entry->pad[2] = 0;
+ return jset_u64s(u64s);
+}
+
+static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type,
+ enum btree_id id, unsigned level,
+ const void *data, unsigned u64s)
+{
+ unsigned ret = journal_entry_init(entry, type, id, level, u64s);
+
+ memcpy_u64s_small(entry->_data, data, u64s);
+ return ret;
+}
+
+static inline struct jset_entry *
+bch2_journal_add_entry(struct journal *j, struct journal_res *res,
+ unsigned type, enum btree_id id,
+ unsigned level, unsigned u64s)
+{
+ struct jset_entry *entry = journal_res_entry(j, res);
+ unsigned actual = journal_entry_init(entry, type, id, level, u64s);
+
+ EBUG_ON(!res->ref);
+ EBUG_ON(actual > res->u64s);
+
+ res->offset += actual;
+ res->u64s -= actual;
+ return entry;
+}
+
+static inline bool journal_entry_empty(struct jset *j)
+{
+ struct jset_entry *i;
+
+ if (j->seq != j->last_seq)
+ return false;
+
+ vstruct_for_each(j, i)
+ if (i->type == BCH_JSET_ENTRY_btree_keys && i->u64s)
+ return false;
+ return true;
+}
+
+/*
+ * Drop reference on a buffer index and return true if the count has hit zero.
+ */
+static inline union journal_res_state journal_state_buf_put(struct journal *j, unsigned idx)
+{
+ union journal_res_state s;
+
+ s.v = atomic64_sub_return(((union journal_res_state) {
+ .buf0_count = idx == 0,
+ .buf1_count = idx == 1,
+ .buf2_count = idx == 2,
+ .buf3_count = idx == 3,
+ }).v, &j->reservations.counter);
+ return s;
+}
+
+void bch2_journal_buf_put_final(struct journal *, u64, bool);
+
+static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
+{
+ union journal_res_state s;
+
+ s = journal_state_buf_put(j, idx);
+ if (!journal_state_count(s, idx))
+ bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx);
+}
+
+static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq)
+{
+ union journal_res_state s;
+
+ s = journal_state_buf_put(j, idx);
+ if (!journal_state_count(s, idx)) {
+ spin_lock(&j->lock);
+ bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx);
+ spin_unlock(&j->lock);
+ }
+}
+
+/*
+ * This function releases the journal write structure so other threads can
+ * then proceed to add their keys as well.
+ */
+static inline void bch2_journal_res_put(struct journal *j,
+ struct journal_res *res)
+{
+ if (!res->ref)
+ return;
+
+ lock_release(&j->res_map, _THIS_IP_);
+
+ while (res->u64s)
+ bch2_journal_add_entry(j, res,
+ BCH_JSET_ENTRY_btree_keys,
+ 0, 0, 0);
+
+ bch2_journal_buf_put(j, res->idx, res->seq);
+
+ res->ref = 0;
+}
+
+int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
+ unsigned);
+
+/* First bits for BCH_WATERMARK: */
+enum journal_res_flags {
+ __JOURNAL_RES_GET_NONBLOCK = BCH_WATERMARK_BITS,
+ __JOURNAL_RES_GET_CHECK,
+};
+
+#define JOURNAL_RES_GET_NONBLOCK (1 << __JOURNAL_RES_GET_NONBLOCK)
+#define JOURNAL_RES_GET_CHECK (1 << __JOURNAL_RES_GET_CHECK)
+
+static inline int journal_res_get_fast(struct journal *j,
+ struct journal_res *res,
+ unsigned flags)
+{
+ union journal_res_state old, new;
+ u64 v = atomic64_read(&j->reservations.counter);
+
+ do {
+ old.v = new.v = v;
+
+ /*
+ * Check if there is still room in the current journal
+ * entry:
+ */
+ if (new.cur_entry_offset + res->u64s > j->cur_entry_u64s)
+ return 0;
+
+ EBUG_ON(!journal_state_count(new, new.idx));
+
+ if ((flags & BCH_WATERMARK_MASK) < j->watermark)
+ return 0;
+
+ new.cur_entry_offset += res->u64s;
+ journal_state_inc(&new);
+
+ /*
+ * If the refcount would overflow, we have to wait:
+ * XXX - tracepoint this:
+ */
+ if (!journal_state_count(new, new.idx))
+ return 0;
+
+ if (flags & JOURNAL_RES_GET_CHECK)
+ return 1;
+ } while ((v = atomic64_cmpxchg(&j->reservations.counter,
+ old.v, new.v)) != old.v);
+
+ res->ref = true;
+ res->idx = old.idx;
+ res->offset = old.cur_entry_offset;
+ res->seq = le64_to_cpu(j->buf[old.idx].data->seq);
+ return 1;
+}
+
+static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res,
+ unsigned u64s, unsigned flags)
+{
+ int ret;
+
+ EBUG_ON(res->ref);
+ EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
+
+ res->u64s = u64s;
+
+ if (journal_res_get_fast(j, res, flags))
+ goto out;
+
+ ret = bch2_journal_res_get_slowpath(j, res, flags);
+ if (ret)
+ return ret;
+out:
+ if (!(flags & JOURNAL_RES_GET_CHECK)) {
+ lock_acquire_shared(&j->res_map, 0,
+ (flags & JOURNAL_RES_GET_NONBLOCK) != 0,
+ NULL, _THIS_IP_);
+ EBUG_ON(!res->ref);
+ }
+ return 0;
+}
+
+/* journal_preres: */
+
+static inline void journal_set_watermark(struct journal *j)
+{
+ union journal_preres_state s = READ_ONCE(j->prereserved);
+ unsigned watermark = BCH_WATERMARK_stripe;
+
+ if (fifo_free(&j->pin) < j->pin.size / 4)
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc);
+ if (fifo_free(&j->pin) < j->pin.size / 8)
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim);
+
+ if (s.reserved > s.remaining)
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc);
+ if (!s.remaining)
+ watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim);
+
+ if (watermark == j->watermark)
+ return;
+
+ swap(watermark, j->watermark);
+ if (watermark > j->watermark)
+ journal_wake(j);
+}
+
+static inline void bch2_journal_preres_put(struct journal *j,
+ struct journal_preres *res)
+{
+ union journal_preres_state s = { .reserved = res->u64s };
+
+ if (!res->u64s)
+ return;
+
+ s.v = atomic64_sub_return(s.v, &j->prereserved.counter);
+ res->u64s = 0;
+
+ if (unlikely(s.waiting)) {
+ clear_bit(ilog2((((union journal_preres_state) { .waiting = 1 }).v)),
+ (unsigned long *) &j->prereserved.v);
+ closure_wake_up(&j->preres_wait);
+ }
+
+ if (s.reserved <= s.remaining && j->watermark)
+ journal_set_watermark(j);
+}
+
+int __bch2_journal_preres_get(struct journal *,
+ struct journal_preres *, unsigned, unsigned);
+
+static inline int bch2_journal_preres_get_fast(struct journal *j,
+ struct journal_preres *res,
+ unsigned new_u64s,
+ unsigned flags,
+ bool set_waiting)
+{
+ int d = new_u64s - res->u64s;
+ union journal_preres_state old, new;
+ u64 v = atomic64_read(&j->prereserved.counter);
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
+ int ret;
+
+ do {
+ old.v = new.v = v;
+ ret = 0;
+
+ if (watermark == BCH_WATERMARK_reclaim ||
+ new.reserved + d < new.remaining) {
+ new.reserved += d;
+ ret = 1;
+ } else if (set_waiting && !new.waiting)
+ new.waiting = true;
+ else
+ return 0;
+ } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
+ old.v, new.v)) != old.v);
+
+ if (ret)
+ res->u64s += d;
+ return ret;
+}
+
+static inline int bch2_journal_preres_get(struct journal *j,
+ struct journal_preres *res,
+ unsigned new_u64s,
+ unsigned flags)
+{
+ if (new_u64s <= res->u64s)
+ return 0;
+
+ if (bch2_journal_preres_get_fast(j, res, new_u64s, flags, false))
+ return 0;
+
+ if (flags & JOURNAL_RES_GET_NONBLOCK)
+ return -BCH_ERR_journal_preres_get_blocked;
+
+ return __bch2_journal_preres_get(j, res, new_u64s, flags);
+}
+
+/* journal_entry_res: */
+
+void bch2_journal_entry_res_resize(struct journal *,
+ struct journal_entry_res *,
+ unsigned);
+
+int bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
+void bch2_journal_flush_async(struct journal *, struct closure *);
+
+int bch2_journal_flush_seq(struct journal *, u64);
+int bch2_journal_flush(struct journal *);
+bool bch2_journal_noflush_seq(struct journal *, u64);
+int bch2_journal_meta(struct journal *);
+
+void bch2_journal_halt(struct journal *);
+
+static inline int bch2_journal_error(struct journal *j)
+{
+ return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL
+ ? -EIO : 0;
+}
+
+struct bch_dev;
+
+static inline void bch2_journal_set_replay_done(struct journal *j)
+{
+ BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
+ set_bit(JOURNAL_REPLAY_DONE, &j->flags);
+}
+
+void bch2_journal_unblock(struct journal *);
+void bch2_journal_block(struct journal *);
+
+void __bch2_journal_debug_to_text(struct printbuf *, struct journal *);
+void bch2_journal_debug_to_text(struct printbuf *, struct journal *);
+void bch2_journal_pins_to_text(struct printbuf *, struct journal *);
+bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
+
+int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
+ unsigned nr);
+int bch2_dev_journal_alloc(struct bch_dev *);
+
+void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
+
+void bch2_fs_journal_stop(struct journal *);
+int bch2_fs_journal_start(struct journal *, u64);
+
+void bch2_dev_journal_exit(struct bch_dev *);
+int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);
+void bch2_fs_journal_exit(struct journal *);
+int bch2_fs_journal_init(struct journal *);
+
+#endif /* _BCACHEFS_JOURNAL_H */
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
new file mode 100644
index 000000000000..6a3d6a374e9c
--- /dev/null
+++ b/fs/bcachefs/journal_io.c
@@ -0,0 +1,1894 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "btree_io.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
+#include "checksum.h"
+#include "disk_groups.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_io.h"
+#include "journal_reclaim.h"
+#include "journal_seq_blacklist.h"
+#include "replicas.h"
+#include "sb-clean.h"
+#include "trace.h"
+
+static struct nonce journal_nonce(const struct jset *jset)
+{
+ return (struct nonce) {{
+ [0] = 0,
+ [1] = ((__le32 *) &jset->seq)[0],
+ [2] = ((__le32 *) &jset->seq)[1],
+ [3] = BCH_NONCE_JOURNAL,
+ }};
+}
+
+static bool jset_csum_good(struct bch_fs *c, struct jset *j)
+{
+ return bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)) &&
+ !bch2_crc_cmp(j->csum,
+ csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j));
+}
+
+static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
+{
+ return (seq - c->journal_entries_base_seq) & (~0U >> 1);
+}
+
+static void __journal_replay_free(struct bch_fs *c,
+ struct journal_replay *i)
+{
+ struct journal_replay **p =
+ genradix_ptr(&c->journal_entries,
+ journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
+
+ BUG_ON(*p != i);
+ *p = NULL;
+ kvpfree(i, offsetof(struct journal_replay, j) +
+ vstruct_bytes(&i->j));
+}
+
+static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
+{
+ i->ignore = true;
+
+ if (!c->opts.read_entire_journal)
+ __journal_replay_free(c, i);
+}
+
+struct journal_list {
+ struct closure cl;
+ u64 last_seq;
+ struct mutex lock;
+ int ret;
+};
+
+#define JOURNAL_ENTRY_ADD_OK 0
+#define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
+
+/*
+ * Given a journal entry we just read, add it to the list of journal entries to
+ * be replayed:
+ */
+static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
+ struct journal_ptr entry_ptr,
+ struct journal_list *jlist, struct jset *j)
+{
+ struct genradix_iter iter;
+ struct journal_replay **_i, *i, *dup;
+ struct journal_ptr *ptr;
+ size_t bytes = vstruct_bytes(j);
+ u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
+ int ret = JOURNAL_ENTRY_ADD_OK;
+
+ /* Is this entry older than the range we need? */
+ if (!c->opts.read_entire_journal &&
+ le64_to_cpu(j->seq) < jlist->last_seq)
+ return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
+
+ /*
+ * genradixes are indexed by a ulong, not a u64, so we can't index them
+ * by sequence number directly: Assume instead that they will all fall
+ * within the range of +-2billion of the filrst one we find.
+ */
+ if (!c->journal_entries_base_seq)
+ c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
+
+ /* Drop entries we don't need anymore */
+ if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
+ genradix_for_each_from(&c->journal_entries, iter, _i,
+ journal_entry_radix_idx(c, jlist->last_seq)) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ if (le64_to_cpu(i->j.seq) >= last_seq)
+ break;
+ journal_replay_free(c, i);
+ }
+ }
+
+ jlist->last_seq = max(jlist->last_seq, last_seq);
+
+ _i = genradix_ptr_alloc(&c->journal_entries,
+ journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
+ GFP_KERNEL);
+ if (!_i)
+ return -BCH_ERR_ENOMEM_journal_entry_add;
+
+ /*
+ * Duplicate journal entries? If so we want the one that didn't have a
+ * checksum error:
+ */
+ dup = *_i;
+ if (dup) {
+ if (bytes == vstruct_bytes(&dup->j) &&
+ !memcmp(j, &dup->j, bytes)) {
+ i = dup;
+ goto found;
+ }
+
+ if (!entry_ptr.csum_good) {
+ i = dup;
+ goto found;
+ }
+
+ if (!dup->csum_good)
+ goto replace;
+
+ fsck_err(c, "found duplicate but non identical journal entries (seq %llu)",
+ le64_to_cpu(j->seq));
+ i = dup;
+ goto found;
+ }
+replace:
+ i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
+ if (!i)
+ return -BCH_ERR_ENOMEM_journal_entry_add;
+
+ i->nr_ptrs = 0;
+ i->csum_good = entry_ptr.csum_good;
+ i->ignore = false;
+ unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
+ i->ptrs[i->nr_ptrs++] = entry_ptr;
+
+ if (dup) {
+ if (dup->nr_ptrs >= ARRAY_SIZE(dup->ptrs)) {
+ bch_err(c, "found too many copies of journal entry %llu",
+ le64_to_cpu(i->j.seq));
+ dup->nr_ptrs = ARRAY_SIZE(dup->ptrs) - 1;
+ }
+
+ /* The first ptr should represent the jset we kept: */
+ memcpy(i->ptrs + i->nr_ptrs,
+ dup->ptrs,
+ sizeof(dup->ptrs[0]) * dup->nr_ptrs);
+ i->nr_ptrs += dup->nr_ptrs;
+ __journal_replay_free(c, dup);
+ }
+
+ *_i = i;
+ return 0;
+found:
+ for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
+ if (ptr->dev == ca->dev_idx) {
+ bch_err(c, "duplicate journal entry %llu on same device",
+ le64_to_cpu(i->j.seq));
+ goto out;
+ }
+ }
+
+ if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
+ bch_err(c, "found too many copies of journal entry %llu",
+ le64_to_cpu(i->j.seq));
+ goto out;
+ }
+
+ i->ptrs[i->nr_ptrs++] = entry_ptr;
+out:
+fsck_err:
+ return ret;
+}
+
+/* this fills in a range with empty jset_entries: */
+static void journal_entry_null_range(void *start, void *end)
+{
+ struct jset_entry *entry;
+
+ for (entry = start; entry != end; entry = vstruct_next(entry))
+ memset(entry, 0, sizeof(*entry));
+}
+
+#define JOURNAL_ENTRY_REREAD 5
+#define JOURNAL_ENTRY_NONE 6
+#define JOURNAL_ENTRY_BAD 7
+
+static void journal_entry_err_msg(struct printbuf *out,
+ u32 version,
+ struct jset *jset,
+ struct jset_entry *entry)
+{
+ prt_str(out, "invalid journal entry, version=");
+ bch2_version_to_text(out, version);
+
+ if (entry) {
+ prt_str(out, " type=");
+ prt_str(out, bch2_jset_entry_types[entry->type]);
+ }
+
+ if (!jset) {
+ prt_printf(out, " in superblock");
+ } else {
+
+ prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq));
+
+ if (entry)
+ prt_printf(out, " offset=%zi/%u",
+ (u64 *) entry - jset->_data,
+ le32_to_cpu(jset->u64s));
+ }
+
+ prt_str(out, ": ");
+}
+
+#define journal_entry_err(c, version, jset, entry, msg, ...) \
+({ \
+ struct printbuf _buf = PRINTBUF; \
+ \
+ journal_entry_err_msg(&_buf, version, jset, entry); \
+ prt_printf(&_buf, msg, ##__VA_ARGS__); \
+ \
+ switch (flags & BKEY_INVALID_WRITE) { \
+ case READ: \
+ mustfix_fsck_err(c, "%s", _buf.buf); \
+ break; \
+ case WRITE: \
+ bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
+ if (bch2_fs_inconsistent(c)) { \
+ ret = -BCH_ERR_fsck_errors_not_fixed; \
+ goto fsck_err; \
+ } \
+ break; \
+ } \
+ \
+ printbuf_exit(&_buf); \
+ true; \
+})
+
+#define journal_entry_err_on(cond, c, version, jset, entry, msg, ...) \
+ ((cond) ? journal_entry_err(c, version, jset, entry, msg, ##__VA_ARGS__) : false)
+
+#define FSCK_DELETED_KEY 5
+
+static int journal_validate_key(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned level, enum btree_id btree_id,
+ struct bkey_i *k,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ int write = flags & BKEY_INVALID_WRITE;
+ void *next = vstruct_next(entry);
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ if (journal_entry_err_on(!k->k.u64s, c, version, jset, entry, "k->u64s 0")) {
+ entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
+ journal_entry_null_range(vstruct_next(entry), next);
+ return FSCK_DELETED_KEY;
+ }
+
+ if (journal_entry_err_on((void *) bkey_next(k) >
+ (void *) vstruct_next(entry),
+ c, version, jset, entry,
+ "extends past end of journal entry")) {
+ entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
+ journal_entry_null_range(vstruct_next(entry), next);
+ return FSCK_DELETED_KEY;
+ }
+
+ if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
+ c, version, jset, entry,
+ "bad format %u", k->k.format)) {
+ le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
+ memmove(k, bkey_next(k), next - (void *) bkey_next(k));
+ journal_entry_null_range(vstruct_next(entry), next);
+ return FSCK_DELETED_KEY;
+ }
+
+ if (!write)
+ bch2_bkey_compat(level, btree_id, version, big_endian,
+ write, NULL, bkey_to_packed(k));
+
+ if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
+ __btree_node_type(level, btree_id), write, &buf)) {
+ printbuf_reset(&buf);
+ journal_entry_err_msg(&buf, version, jset, entry);
+ prt_newline(&buf);
+ printbuf_indent_add(&buf, 2);
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+ prt_newline(&buf);
+ bch2_bkey_invalid(c, bkey_i_to_s_c(k),
+ __btree_node_type(level, btree_id), write, &buf);
+
+ mustfix_fsck_err(c, "%s", buf.buf);
+
+ le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
+ memmove(k, bkey_next(k), next - (void *) bkey_next(k));
+ journal_entry_null_range(vstruct_next(entry), next);
+
+ printbuf_exit(&buf);
+ return FSCK_DELETED_KEY;
+ }
+
+ if (write)
+ bch2_bkey_compat(level, btree_id, version, big_endian,
+ write, NULL, bkey_to_packed(k));
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int journal_entry_btree_keys_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ struct bkey_i *k = entry->start;
+
+ while (k != vstruct_last(entry)) {
+ int ret = journal_validate_key(c, jset, entry,
+ entry->level,
+ entry->btree_id,
+ k, version, big_endian,
+ flags|BKEY_INVALID_JOURNAL);
+ if (ret == FSCK_DELETED_KEY)
+ continue;
+
+ k = bkey_next(k);
+ }
+
+ return 0;
+}
+
+static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct bkey_i *k;
+ bool first = true;
+
+ jset_entry_for_each_key(entry, k) {
+ if (!first) {
+ prt_newline(out);
+ prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ }
+ prt_printf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
+ first = false;
+ }
+}
+
+static int journal_entry_btree_root_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ struct bkey_i *k = entry->start;
+ int ret = 0;
+
+ if (journal_entry_err_on(!entry->u64s ||
+ le16_to_cpu(entry->u64s) != k->k.u64s,
+ c, version, jset, entry,
+ "invalid btree root journal entry: wrong number of keys")) {
+ void *next = vstruct_next(entry);
+ /*
+ * we don't want to null out this jset_entry,
+ * just the contents, so that later we can tell
+ * we were _supposed_ to have a btree root
+ */
+ entry->u64s = 0;
+ journal_entry_null_range(vstruct_next(entry), next);
+ return 0;
+ }
+
+ return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
+ version, big_endian, flags);
+fsck_err:
+ return ret;
+}
+
+static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ journal_entry_btree_keys_to_text(out, c, entry);
+}
+
+static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ /* obsolete, don't care: */
+ return 0;
+}
+
+static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+}
+
+static int journal_entry_blacklist_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ int ret = 0;
+
+ if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
+ c, version, jset, entry,
+ "invalid journal seq blacklist entry: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ }
+fsck_err:
+ return ret;
+}
+
+static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_blacklist *bl =
+ container_of(entry, struct jset_entry_blacklist, entry);
+
+ prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
+}
+
+static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ struct jset_entry_blacklist_v2 *bl_entry;
+ int ret = 0;
+
+ if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
+ c, version, jset, entry,
+ "invalid journal seq blacklist entry: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ goto out;
+ }
+
+ bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
+
+ if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
+ le64_to_cpu(bl_entry->end),
+ c, version, jset, entry,
+ "invalid journal seq blacklist entry: start > end")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ }
+out:
+fsck_err:
+ return ret;
+}
+
+static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_blacklist_v2 *bl =
+ container_of(entry, struct jset_entry_blacklist_v2, entry);
+
+ prt_printf(out, "start=%llu end=%llu",
+ le64_to_cpu(bl->start),
+ le64_to_cpu(bl->end));
+}
+
+static int journal_entry_usage_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes < sizeof(*u),
+ c, version, jset, entry,
+ "invalid journal entry usage: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+
+ prt_printf(out, "type=%s v=%llu",
+ bch2_fs_usage_types[u->entry.btree_id],
+ le64_to_cpu(u->v));
+}
+
+static int journal_entry_data_usage_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes < sizeof(*u) ||
+ bytes < sizeof(*u) + u->r.nr_devs,
+ c, version, jset, entry,
+ "invalid journal entry usage: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+
+ bch2_replicas_entry_to_text(out, &u->r);
+ prt_printf(out, "=%llu", le64_to_cpu(u->v));
+}
+
+static int journal_entry_clock_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ struct jset_entry_clock *clock =
+ container_of(entry, struct jset_entry_clock, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes != sizeof(*clock),
+ c, version, jset, entry, "bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+ if (journal_entry_err_on(clock->rw > 1,
+ c, version, jset, entry, "bad rw")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_clock *clock =
+ container_of(entry, struct jset_entry_clock, entry);
+
+ prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
+}
+
+static int journal_entry_dev_usage_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ struct jset_entry_dev_usage *u =
+ container_of(entry, struct jset_entry_dev_usage, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ unsigned expected = sizeof(*u);
+ unsigned dev;
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes < expected,
+ c, version, jset, entry, "bad size (%u < %u)",
+ bytes, expected)) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+ dev = le32_to_cpu(u->dev);
+
+ if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
+ c, version, jset, entry, "bad dev")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+ if (journal_entry_err_on(u->pad,
+ c, version, jset, entry, "bad pad")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_dev_usage *u =
+ container_of(entry, struct jset_entry_dev_usage, entry);
+ unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
+
+ prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
+
+ for (i = 0; i < nr_types; i++) {
+ if (i < BCH_DATA_NR)
+ prt_printf(out, " %s", bch2_data_types[i]);
+ else
+ prt_printf(out, " (unknown data type %u)", i);
+ prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
+ le64_to_cpu(u->d[i].buckets),
+ le64_to_cpu(u->d[i].sectors),
+ le64_to_cpu(u->d[i].fragmented));
+ }
+
+ prt_printf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec));
+}
+
+static int journal_entry_log_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ return 0;
+}
+
+static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
+ unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
+
+ prt_printf(out, "%.*s", bytes, l->d);
+}
+
+static int journal_entry_overwrite_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ return journal_entry_btree_keys_validate(c, jset, entry,
+ version, big_endian, READ);
+}
+
+static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ journal_entry_btree_keys_to_text(out, c, entry);
+}
+
+struct jset_entry_ops {
+ int (*validate)(struct bch_fs *, struct jset *,
+ struct jset_entry *, unsigned, int,
+ enum bkey_invalid_flags);
+ void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
+};
+
+static const struct jset_entry_ops bch2_jset_entry_ops[] = {
+#define x(f, nr) \
+ [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
+ .validate = journal_entry_##f##_validate, \
+ .to_text = journal_entry_##f##_to_text, \
+ },
+ BCH_JSET_ENTRY_TYPES()
+#undef x
+};
+
+int bch2_journal_entry_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ enum bkey_invalid_flags flags)
+{
+ return entry->type < BCH_JSET_ENTRY_NR
+ ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
+ version, big_endian, flags)
+ : 0;
+}
+
+void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ if (entry->type < BCH_JSET_ENTRY_NR) {
+ prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
+ } else {
+ prt_printf(out, "(unknown type %u)", entry->type);
+ }
+}
+
+static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
+ enum bkey_invalid_flags flags)
+{
+ struct jset_entry *entry;
+ unsigned version = le32_to_cpu(jset->version);
+ int ret = 0;
+
+ vstruct_for_each(jset, entry) {
+ if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
+ c, version, jset, entry,
+ "journal entry extends past end of jset")) {
+ jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
+ break;
+ }
+
+ ret = bch2_journal_entry_validate(c, jset, entry,
+ version, JSET_BIG_ENDIAN(jset), flags);
+ if (ret)
+ break;
+ }
+fsck_err:
+ return ret;
+}
+
+static int jset_validate(struct bch_fs *c,
+ struct bch_dev *ca,
+ struct jset *jset, u64 sector,
+ enum bkey_invalid_flags flags)
+{
+ unsigned version;
+ int ret = 0;
+
+ if (le64_to_cpu(jset->magic) != jset_magic(c))
+ return JOURNAL_ENTRY_NONE;
+
+ version = le32_to_cpu(jset->version);
+ if (journal_entry_err_on(!bch2_version_compatible(version),
+ c, version, jset, NULL,
+ "%s sector %llu seq %llu: incompatible journal entry version %u.%u",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq),
+ BCH_VERSION_MAJOR(version),
+ BCH_VERSION_MINOR(version))) {
+ /* don't try to continue: */
+ return -EINVAL;
+ }
+
+ if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
+ c, version, jset, NULL,
+ "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq),
+ JSET_CSUM_TYPE(jset)))
+ ret = JOURNAL_ENTRY_BAD;
+
+ /* last_seq is ignored when JSET_NO_FLUSH is true */
+ if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
+ le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
+ c, version, jset, NULL,
+ "invalid journal entry: last_seq > seq (%llu > %llu)",
+ le64_to_cpu(jset->last_seq),
+ le64_to_cpu(jset->seq))) {
+ jset->last_seq = jset->seq;
+ return JOURNAL_ENTRY_BAD;
+ }
+
+ ret = jset_validate_entries(c, jset, flags);
+fsck_err:
+ return ret;
+}
+
+static int jset_validate_early(struct bch_fs *c,
+ struct bch_dev *ca,
+ struct jset *jset, u64 sector,
+ unsigned bucket_sectors_left,
+ unsigned sectors_read)
+{
+ size_t bytes = vstruct_bytes(jset);
+ unsigned version;
+ enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
+ int ret = 0;
+
+ if (le64_to_cpu(jset->magic) != jset_magic(c))
+ return JOURNAL_ENTRY_NONE;
+
+ version = le32_to_cpu(jset->version);
+ if (journal_entry_err_on(!bch2_version_compatible(version),
+ c, version, jset, NULL,
+ "%s sector %llu seq %llu: unknown journal entry version %u.%u",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq),
+ BCH_VERSION_MAJOR(version),
+ BCH_VERSION_MINOR(version))) {
+ /* don't try to continue: */
+ return -EINVAL;
+ }
+
+ if (bytes > (sectors_read << 9) &&
+ sectors_read < bucket_sectors_left)
+ return JOURNAL_ENTRY_REREAD;
+
+ if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
+ c, version, jset, NULL,
+ "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq), bytes))
+ le32_add_cpu(&jset->u64s,
+ -((bytes - (bucket_sectors_left << 9)) / 8));
+fsck_err:
+ return ret;
+}
+
+struct journal_read_buf {
+ void *data;
+ size_t size;
+};
+
+static int journal_read_buf_realloc(struct journal_read_buf *b,
+ size_t new_size)
+{
+ void *n;
+
+ /* the bios are sized for this many pages, max: */
+ if (new_size > JOURNAL_ENTRY_SIZE_MAX)
+ return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
+
+ new_size = roundup_pow_of_two(new_size);
+ n = kvpmalloc(new_size, GFP_KERNEL);
+ if (!n)
+ return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
+
+ kvpfree(b->data, b->size);
+ b->data = n;
+ b->size = new_size;
+ return 0;
+}
+
+static int journal_read_bucket(struct bch_dev *ca,
+ struct journal_read_buf *buf,
+ struct journal_list *jlist,
+ unsigned bucket)
+{
+ struct bch_fs *c = ca->fs;
+ struct journal_device *ja = &ca->journal;
+ struct jset *j = NULL;
+ unsigned sectors, sectors_read = 0;
+ u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
+ end = offset + ca->mi.bucket_size;
+ bool saw_bad = false, csum_good;
+ int ret = 0;
+
+ pr_debug("reading %u", bucket);
+
+ while (offset < end) {
+ if (!sectors_read) {
+ struct bio *bio;
+ unsigned nr_bvecs;
+reread:
+ sectors_read = min_t(unsigned,
+ end - offset, buf->size >> 9);
+ nr_bvecs = buf_pages(buf->data, sectors_read << 9);
+
+ bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
+ bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
+
+ bio->bi_iter.bi_sector = offset;
+ bch2_bio_map(bio, buf->data, sectors_read << 9);
+
+ ret = submit_bio_wait(bio);
+ kfree(bio);
+
+ if (bch2_dev_io_err_on(ret, ca,
+ "journal read error: sector %llu",
+ offset) ||
+ bch2_meta_read_fault("journal")) {
+ /*
+ * We don't error out of the recovery process
+ * here, since the relevant journal entry may be
+ * found on a different device, and missing or
+ * no journal entries will be handled later
+ */
+ return 0;
+ }
+
+ j = buf->data;
+ }
+
+ ret = jset_validate_early(c, ca, j, offset,
+ end - offset, sectors_read);
+ switch (ret) {
+ case 0:
+ sectors = vstruct_sectors(j, c->block_bits);
+ break;
+ case JOURNAL_ENTRY_REREAD:
+ if (vstruct_bytes(j) > buf->size) {
+ ret = journal_read_buf_realloc(buf,
+ vstruct_bytes(j));
+ if (ret)
+ return ret;
+ }
+ goto reread;
+ case JOURNAL_ENTRY_NONE:
+ if (!saw_bad)
+ return 0;
+ /*
+ * On checksum error we don't really trust the size
+ * field of the journal entry we read, so try reading
+ * again at next block boundary:
+ */
+ sectors = block_sectors(c);
+ goto next_block;
+ default:
+ return ret;
+ }
+
+ /*
+ * This happens sometimes if we don't have discards on -
+ * when we've partially overwritten a bucket with new
+ * journal entries. We don't need the rest of the
+ * bucket:
+ */
+ if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
+ return 0;
+
+ ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
+
+ csum_good = jset_csum_good(c, j);
+ if (!csum_good)
+ saw_bad = true;
+
+ ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
+ j->encrypted_start,
+ vstruct_end(j) - (void *) j->encrypted_start);
+ bch2_fs_fatal_err_on(ret, c,
+ "error decrypting journal entry: %i", ret);
+
+ mutex_lock(&jlist->lock);
+ ret = journal_entry_add(c, ca, (struct journal_ptr) {
+ .csum_good = csum_good,
+ .dev = ca->dev_idx,
+ .bucket = bucket,
+ .bucket_offset = offset -
+ bucket_to_sector(ca, ja->buckets[bucket]),
+ .sector = offset,
+ }, jlist, j);
+ mutex_unlock(&jlist->lock);
+
+ switch (ret) {
+ case JOURNAL_ENTRY_ADD_OK:
+ break;
+ case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
+ break;
+ default:
+ return ret;
+ }
+next_block:
+ pr_debug("next");
+ offset += sectors;
+ sectors_read -= sectors;
+ j = ((void *) j) + (sectors << 9);
+ }
+
+ return 0;
+}
+
+static void bch2_journal_read_device(struct closure *cl)
+{
+ struct journal_device *ja =
+ container_of(cl, struct journal_device, read);
+ struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
+ struct bch_fs *c = ca->fs;
+ struct journal_list *jlist =
+ container_of(cl->parent, struct journal_list, cl);
+ struct journal_replay *r, **_r;
+ struct genradix_iter iter;
+ struct journal_read_buf buf = { NULL, 0 };
+ unsigned i;
+ int ret = 0;
+
+ if (!ja->nr)
+ goto out;
+
+ ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
+ if (ret)
+ goto err;
+
+ pr_debug("%u journal buckets", ja->nr);
+
+ for (i = 0; i < ja->nr; i++) {
+ ret = journal_read_bucket(ca, &buf, jlist, i);
+ if (ret)
+ goto err;
+ }
+
+ ja->sectors_free = ca->mi.bucket_size;
+
+ mutex_lock(&jlist->lock);
+ genradix_for_each_reverse(&c->journal_entries, iter, _r) {
+ r = *_r;
+
+ if (!r)
+ continue;
+
+ for (i = 0; i < r->nr_ptrs; i++) {
+ if (r->ptrs[i].dev == ca->dev_idx) {
+ unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) +
+ vstruct_sectors(&r->j, c->block_bits);
+
+ ja->cur_idx = r->ptrs[i].bucket;
+ ja->sectors_free = ca->mi.bucket_size - wrote;
+ goto found;
+ }
+ }
+ }
+found:
+ mutex_unlock(&jlist->lock);
+
+ if (ja->bucket_seq[ja->cur_idx] &&
+ ja->sectors_free == ca->mi.bucket_size) {
+ bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
+ bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
+ for (i = 0; i < 3; i++) {
+ unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
+
+ bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
+ }
+ ja->sectors_free = 0;
+ }
+
+ /*
+ * Set dirty_idx to indicate the entire journal is full and needs to be
+ * reclaimed - journal reclaim will immediately reclaim whatever isn't
+ * pinned when it first runs:
+ */
+ ja->discard_idx = ja->dirty_idx_ondisk =
+ ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
+out:
+ bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
+ kvpfree(buf.data, buf.size);
+ percpu_ref_put(&ca->io_ref);
+ closure_return(cl);
+ return;
+err:
+ mutex_lock(&jlist->lock);
+ jlist->ret = ret;
+ mutex_unlock(&jlist->lock);
+ goto out;
+}
+
+void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
+ struct journal_replay *j)
+{
+ unsigned i;
+
+ for (i = 0; i < j->nr_ptrs; i++) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
+ u64 offset;
+
+ div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
+
+ if (i)
+ prt_printf(out, " ");
+ prt_printf(out, "%u:%u:%u (sector %llu)",
+ j->ptrs[i].dev,
+ j->ptrs[i].bucket,
+ j->ptrs[i].bucket_offset,
+ j->ptrs[i].sector);
+ }
+}
+
+int bch2_journal_read(struct bch_fs *c,
+ u64 *last_seq,
+ u64 *blacklist_seq,
+ u64 *start_seq)
+{
+ struct journal_list jlist;
+ struct journal_replay *i, **_i, *prev = NULL;
+ struct genradix_iter radix_iter;
+ struct bch_dev *ca;
+ unsigned iter;
+ struct printbuf buf = PRINTBUF;
+ bool degraded = false, last_write_torn = false;
+ u64 seq;
+ int ret = 0;
+
+ closure_init_stack(&jlist.cl);
+ mutex_init(&jlist.lock);
+ jlist.last_seq = 0;
+ jlist.ret = 0;
+
+ for_each_member_device(ca, c, iter) {
+ if (!c->opts.fsck &&
+ !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
+ continue;
+
+ if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
+ ca->mi.state == BCH_MEMBER_STATE_ro) &&
+ percpu_ref_tryget(&ca->io_ref))
+ closure_call(&ca->journal.read,
+ bch2_journal_read_device,
+ system_unbound_wq,
+ &jlist.cl);
+ else
+ degraded = true;
+ }
+
+ closure_sync(&jlist.cl);
+
+ if (jlist.ret)
+ return jlist.ret;
+
+ *last_seq = 0;
+ *start_seq = 0;
+ *blacklist_seq = 0;
+
+ /*
+ * Find most recent flush entry, and ignore newer non flush entries -
+ * those entries will be blacklisted:
+ */
+ genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
+ enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
+
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ if (!*start_seq)
+ *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1;
+
+ if (JSET_NO_FLUSH(&i->j)) {
+ i->ignore = true;
+ continue;
+ }
+
+ if (!last_write_torn && !i->csum_good) {
+ last_write_torn = true;
+ i->ignore = true;
+ continue;
+ }
+
+ if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
+ c, le32_to_cpu(i->j.version), &i->j, NULL,
+ "invalid journal entry: last_seq > seq (%llu > %llu)",
+ le64_to_cpu(i->j.last_seq),
+ le64_to_cpu(i->j.seq)))
+ i->j.last_seq = i->j.seq;
+
+ *last_seq = le64_to_cpu(i->j.last_seq);
+ *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
+ break;
+ }
+
+ if (!*start_seq) {
+ bch_info(c, "journal read done, but no entries found");
+ return 0;
+ }
+
+ if (!*last_seq) {
+ fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
+ return 0;
+ }
+
+ bch_info(c, "journal read done, replaying entries %llu-%llu",
+ *last_seq, *blacklist_seq - 1);
+
+ if (*start_seq != *blacklist_seq)
+ bch_info(c, "dropped unflushed entries %llu-%llu",
+ *blacklist_seq, *start_seq - 1);
+
+ /* Drop blacklisted entries and entries older than last_seq: */
+ genradix_for_each(&c->journal_entries, radix_iter, _i) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ seq = le64_to_cpu(i->j.seq);
+ if (seq < *last_seq) {
+ journal_replay_free(c, i);
+ continue;
+ }
+
+ if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
+ fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
+ "found blacklisted journal entry %llu", seq);
+ i->ignore = true;
+ }
+ }
+
+ /* Check for missing entries: */
+ seq = *last_seq;
+ genradix_for_each(&c->journal_entries, radix_iter, _i) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ BUG_ON(seq > le64_to_cpu(i->j.seq));
+
+ while (seq < le64_to_cpu(i->j.seq)) {
+ u64 missing_start, missing_end;
+ struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+
+ while (seq < le64_to_cpu(i->j.seq) &&
+ bch2_journal_seq_is_blacklisted(c, seq, false))
+ seq++;
+
+ if (seq == le64_to_cpu(i->j.seq))
+ break;
+
+ missing_start = seq;
+
+ while (seq < le64_to_cpu(i->j.seq) &&
+ !bch2_journal_seq_is_blacklisted(c, seq, false))
+ seq++;
+
+ if (prev) {
+ bch2_journal_ptrs_to_text(&buf1, c, prev);
+ prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
+ } else
+ prt_printf(&buf1, "(none)");
+ bch2_journal_ptrs_to_text(&buf2, c, i);
+
+ missing_end = seq - 1;
+ fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
+ " prev at %s\n"
+ " next at %s",
+ missing_start, missing_end,
+ *last_seq, *blacklist_seq - 1,
+ buf1.buf, buf2.buf);
+
+ printbuf_exit(&buf1);
+ printbuf_exit(&buf2);
+ }
+
+ prev = i;
+ seq++;
+ }
+
+ genradix_for_each(&c->journal_entries, radix_iter, _i) {
+ struct bch_replicas_padded replicas = {
+ .e.data_type = BCH_DATA_journal,
+ .e.nr_required = 1,
+ };
+ unsigned ptr;
+
+ i = *_i;
+ if (!i || i->ignore)
+ continue;
+
+ for (ptr = 0; ptr < i->nr_ptrs; ptr++) {
+ ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
+
+ if (!i->ptrs[ptr].csum_good)
+ bch_err_dev_offset(ca, i->ptrs[ptr].sector,
+ "invalid journal checksum, seq %llu%s",
+ le64_to_cpu(i->j.seq),
+ i->csum_good ? " (had good copy on another device)" : "");
+ }
+
+ ret = jset_validate(c,
+ bch_dev_bkey_exists(c, i->ptrs[0].dev),
+ &i->j,
+ i->ptrs[0].sector,
+ READ);
+ if (ret)
+ goto err;
+
+ for (ptr = 0; ptr < i->nr_ptrs; ptr++)
+ replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
+
+ bch2_replicas_entry_sort(&replicas.e);
+
+ printbuf_reset(&buf);
+ bch2_replicas_entry_to_text(&buf, &replicas.e);
+
+ if (!degraded &&
+ !bch2_replicas_marked(c, &replicas.e) &&
+ (le64_to_cpu(i->j.seq) == *last_seq ||
+ fsck_err(c, "superblock not marked as containing replicas for journal entry %llu\n %s",
+ le64_to_cpu(i->j.seq), buf.buf))) {
+ ret = bch2_mark_replicas(c, &replicas.e);
+ if (ret)
+ goto err;
+ }
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+/* journal write: */
+
+static void __journal_write_alloc(struct journal *j,
+ struct journal_buf *w,
+ struct dev_alloc_list *devs_sorted,
+ unsigned sectors,
+ unsigned *replicas,
+ unsigned replicas_want)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct journal_device *ja;
+ struct bch_dev *ca;
+ unsigned i;
+
+ if (*replicas >= replicas_want)
+ return;
+
+ for (i = 0; i < devs_sorted->nr; i++) {
+ ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
+ if (!ca)
+ continue;
+
+ ja = &ca->journal;
+
+ /*
+ * Check that we can use this device, and aren't already using
+ * it:
+ */
+ if (!ca->mi.durability ||
+ ca->mi.state != BCH_MEMBER_STATE_rw ||
+ !ja->nr ||
+ bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) ||
+ sectors > ja->sectors_free)
+ continue;
+
+ bch2_dev_stripe_increment(ca, &j->wp.stripe);
+
+ bch2_bkey_append_ptr(&w->key,
+ (struct bch_extent_ptr) {
+ .offset = bucket_to_sector(ca,
+ ja->buckets[ja->cur_idx]) +
+ ca->mi.bucket_size -
+ ja->sectors_free,
+ .dev = ca->dev_idx,
+ });
+
+ ja->sectors_free -= sectors;
+ ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
+
+ *replicas += ca->mi.durability;
+
+ if (*replicas >= replicas_want)
+ break;
+ }
+}
+
+/**
+ * journal_write_alloc - decide where to write next journal entry
+ *
+ * @j: journal object
+ * @w: journal buf (entry to be written)
+ *
+ * Returns: 0 on success, or -EROFS on failure
+ */
+static int journal_write_alloc(struct journal *j, struct journal_buf *w)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_devs_mask devs;
+ struct journal_device *ja;
+ struct bch_dev *ca;
+ struct dev_alloc_list devs_sorted;
+ unsigned sectors = vstruct_sectors(w->data, c->block_bits);
+ unsigned target = c->opts.metadata_target ?:
+ c->opts.foreground_target;
+ unsigned i, replicas = 0, replicas_want =
+ READ_ONCE(c->opts.metadata_replicas);
+
+ rcu_read_lock();
+retry:
+ devs = target_rw_devs(c, BCH_DATA_journal, target);
+
+ devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
+
+ __journal_write_alloc(j, w, &devs_sorted,
+ sectors, &replicas, replicas_want);
+
+ if (replicas >= replicas_want)
+ goto done;
+
+ for (i = 0; i < devs_sorted.nr; i++) {
+ ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
+ if (!ca)
+ continue;
+
+ ja = &ca->journal;
+
+ if (sectors > ja->sectors_free &&
+ sectors <= ca->mi.bucket_size &&
+ bch2_journal_dev_buckets_available(j, ja,
+ journal_space_discarded)) {
+ ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
+ ja->sectors_free = ca->mi.bucket_size;
+
+ /*
+ * ja->bucket_seq[ja->cur_idx] must always have
+ * something sensible:
+ */
+ ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
+ }
+ }
+
+ __journal_write_alloc(j, w, &devs_sorted,
+ sectors, &replicas, replicas_want);
+
+ if (replicas < replicas_want && target) {
+ /* Retry from all devices: */
+ target = 0;
+ goto retry;
+ }
+done:
+ rcu_read_unlock();
+
+ BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
+
+ return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
+}
+
+static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
+{
+ /* we aren't holding j->lock: */
+ unsigned new_size = READ_ONCE(j->buf_size_want);
+ void *new_buf;
+
+ if (buf->buf_size >= new_size)
+ return;
+
+ new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
+ if (!new_buf)
+ return;
+
+ memcpy(new_buf, buf->data, buf->buf_size);
+
+ spin_lock(&j->lock);
+ swap(buf->data, new_buf);
+ swap(buf->buf_size, new_size);
+ spin_unlock(&j->lock);
+
+ kvpfree(new_buf, new_size);
+}
+
+static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
+{
+ return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
+}
+
+static void journal_write_done(struct closure *cl)
+{
+ struct journal *j = container_of(cl, struct journal, io);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct journal_buf *w = journal_last_unwritten_buf(j);
+ struct bch_replicas_padded replicas;
+ union journal_res_state old, new;
+ u64 v, seq;
+ int err = 0;
+
+ bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
+ ? j->flush_write_time
+ : j->noflush_write_time, j->write_start_time);
+
+ if (!w->devs_written.nr) {
+ bch_err(c, "unable to write journal to sufficient devices");
+ err = -EIO;
+ } else {
+ bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
+ w->devs_written);
+ if (bch2_mark_replicas(c, &replicas.e))
+ err = -EIO;
+ }
+
+ if (err)
+ bch2_fatal_error(c);
+
+ spin_lock(&j->lock);
+ seq = le64_to_cpu(w->data->seq);
+
+ if (seq >= j->pin.front)
+ journal_seq_pin(j, seq)->devs = w->devs_written;
+
+ if (!err) {
+ if (!JSET_NO_FLUSH(w->data)) {
+ j->flushed_seq_ondisk = seq;
+ j->last_seq_ondisk = w->last_seq;
+
+ bch2_do_discards(c);
+ closure_wake_up(&c->freelist_wait);
+
+ bch2_reset_alloc_cursors(c);
+ }
+ } else if (!j->err_seq || seq < j->err_seq)
+ j->err_seq = seq;
+
+ j->seq_ondisk = seq;
+
+ /*
+ * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
+ * more buckets:
+ *
+ * Must come before signaling write completion, for
+ * bch2_fs_journal_stop():
+ */
+ if (j->watermark != BCH_WATERMARK_stripe)
+ journal_reclaim_kick(&c->journal);
+
+ /* also must come before signalling write completion: */
+ closure_debug_destroy(cl);
+
+ v = atomic64_read(&j->reservations.counter);
+ do {
+ old.v = new.v = v;
+ BUG_ON(journal_state_count(new, new.unwritten_idx));
+
+ new.unwritten_idx++;
+ } while ((v = atomic64_cmpxchg(&j->reservations.counter,
+ old.v, new.v)) != old.v);
+
+ bch2_journal_space_available(j);
+
+ closure_wake_up(&w->wait);
+ journal_wake(j);
+
+ if (!journal_state_count(new, new.unwritten_idx) &&
+ journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
+ spin_unlock(&j->lock);
+ closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
+ } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
+ new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
+ struct journal_buf *buf = journal_cur_buf(j);
+ long delta = buf->expires - jiffies;
+
+ /*
+ * We don't close a journal entry to write it while there's
+ * previous entries still in flight - the current journal entry
+ * might want to be written now:
+ */
+
+ spin_unlock(&j->lock);
+ mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
+ } else {
+ spin_unlock(&j->lock);
+ }
+}
+
+static void journal_write_endio(struct bio *bio)
+{
+ struct bch_dev *ca = bio->bi_private;
+ struct journal *j = &ca->fs->journal;
+ struct journal_buf *w = journal_last_unwritten_buf(j);
+ unsigned long flags;
+
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
+ le64_to_cpu(w->data->seq),
+ bch2_blk_status_to_str(bio->bi_status)) ||
+ bch2_meta_write_fault("journal")) {
+ spin_lock_irqsave(&j->err_lock, flags);
+ bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
+ spin_unlock_irqrestore(&j->err_lock, flags);
+ }
+
+ closure_put(&j->io);
+ percpu_ref_put(&ca->io_ref);
+}
+
+static void do_journal_write(struct closure *cl)
+{
+ struct journal *j = container_of(cl, struct journal, io);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ struct journal_buf *w = journal_last_unwritten_buf(j);
+ struct bch_extent_ptr *ptr;
+ struct bio *bio;
+ unsigned sectors = vstruct_sectors(w->data, c->block_bits);
+
+ extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+ if (!percpu_ref_tryget(&ca->io_ref)) {
+ /* XXX: fix this */
+ bch_err(c, "missing device for journal write\n");
+ continue;
+ }
+
+ this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
+ sectors);
+
+ bio = ca->journal.bio;
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
+ bio->bi_iter.bi_sector = ptr->offset;
+ bio->bi_end_io = journal_write_endio;
+ bio->bi_private = ca;
+
+ BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
+ ca->prev_journal_sector = bio->bi_iter.bi_sector;
+
+ if (!JSET_NO_FLUSH(w->data))
+ bio->bi_opf |= REQ_FUA;
+ if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
+ bio->bi_opf |= REQ_PREFLUSH;
+
+ bch2_bio_map(bio, w->data, sectors << 9);
+
+ trace_and_count(c, journal_write, bio);
+ closure_bio_submit(bio, cl);
+
+ ca->journal.bucket_seq[ca->journal.cur_idx] =
+ le64_to_cpu(w->data->seq);
+ }
+
+ continue_at(cl, journal_write_done, c->io_complete_wq);
+}
+
+static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset)
+{
+ struct jset_entry *i, *next, *prev = NULL;
+
+ /*
+ * Simple compaction, dropping empty jset_entries (from journal
+ * reservations that weren't fully used) and merging jset_entries that
+ * can be.
+ *
+ * If we wanted to be really fancy here, we could sort all the keys in
+ * the jset and drop keys that were overwritten - probably not worth it:
+ */
+ vstruct_for_each_safe(jset, i, next) {
+ unsigned u64s = le16_to_cpu(i->u64s);
+
+ /* Empty entry: */
+ if (!u64s)
+ continue;
+
+ if (i->type == BCH_JSET_ENTRY_btree_root)
+ bch2_journal_entry_to_btree_root(c, i);
+
+ /* Can we merge with previous entry? */
+ if (prev &&
+ i->btree_id == prev->btree_id &&
+ i->level == prev->level &&
+ i->type == prev->type &&
+ i->type == BCH_JSET_ENTRY_btree_keys &&
+ le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
+ memmove_u64s_down(vstruct_next(prev),
+ i->_data,
+ u64s);
+ le16_add_cpu(&prev->u64s, u64s);
+ continue;
+ }
+
+ /* Couldn't merge, move i into new position (after prev): */
+ prev = prev ? vstruct_next(prev) : jset->start;
+ if (i != prev)
+ memmove_u64s_down(prev, i, jset_u64s(u64s));
+ }
+
+ prev = prev ? vstruct_next(prev) : jset->start;
+ jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
+}
+
+void bch2_journal_write(struct closure *cl)
+{
+ struct journal *j = container_of(cl, struct journal, io);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ struct journal_buf *w = journal_last_unwritten_buf(j);
+ struct bch_replicas_padded replicas;
+ struct jset_entry *start, *end;
+ struct jset *jset;
+ struct bio *bio;
+ struct printbuf journal_debug_buf = PRINTBUF;
+ bool validate_before_checksum = false;
+ unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
+ int ret;
+
+ BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
+
+ journal_buf_realloc(j, w);
+ jset = w->data;
+
+ j->write_start_time = local_clock();
+
+ spin_lock(&j->lock);
+
+ /*
+ * If the journal is in an error state - we did an emergency shutdown -
+ * we prefer to continue doing journal writes. We just mark them as
+ * noflush so they'll never be used, but they'll still be visible by the
+ * list_journal tool - this helps in debugging.
+ *
+ * There's a caveat: the first journal write after marking the
+ * superblock dirty must always be a flush write, because on startup
+ * from a clean shutdown we didn't necessarily read the journal and the
+ * new journal write might overwrite whatever was in the journal
+ * previously - we can't leave the journal without any flush writes in
+ * it.
+ *
+ * So if we're in an error state, and we're still starting up, we don't
+ * write anything at all.
+ */
+ if (!test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags) &&
+ (bch2_journal_error(j) ||
+ w->noflush ||
+ (!w->must_flush &&
+ (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
+ test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
+ w->noflush = true;
+ SET_JSET_NO_FLUSH(jset, true);
+ jset->last_seq = 0;
+ w->last_seq = 0;
+
+ j->nr_noflush_writes++;
+ } else if (!bch2_journal_error(j)) {
+ j->last_flush_write = jiffies;
+ j->nr_flush_writes++;
+ clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
+ } else {
+ spin_unlock(&j->lock);
+ goto err;
+ }
+ spin_unlock(&j->lock);
+
+ /*
+ * New btree roots are set by journalling them; when the journal entry
+ * gets written we have to propagate them to c->btree_roots
+ *
+ * But, every journal entry we write has to contain all the btree roots
+ * (at least for now); so after we copy btree roots to c->btree_roots we
+ * have to get any missing btree roots and add them to this journal
+ * entry:
+ */
+
+ bch2_journal_entries_postprocess(c, jset);
+
+ start = end = vstruct_last(jset);
+
+ end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
+
+ bch2_journal_super_entries_add_common(c, &end,
+ le64_to_cpu(jset->seq));
+ u64s = (u64 *) end - (u64 *) start;
+ BUG_ON(u64s > j->entry_u64s_reserved);
+
+ le32_add_cpu(&jset->u64s, u64s);
+
+ sectors = vstruct_sectors(jset, c->block_bits);
+ bytes = vstruct_bytes(jset);
+
+ if (sectors > w->sectors) {
+ bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
+ vstruct_bytes(jset), w->sectors << 9,
+ u64s, w->u64s_reserved, j->entry_u64s_reserved);
+ goto err;
+ }
+
+ jset->magic = cpu_to_le64(jset_magic(c));
+ jset->version = cpu_to_le32(c->sb.version);
+
+ SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
+ SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
+
+ if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
+ j->last_empty_seq = le64_to_cpu(jset->seq);
+
+ if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
+ validate_before_checksum = true;
+
+ if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
+ validate_before_checksum = true;
+
+ if (validate_before_checksum &&
+ jset_validate(c, NULL, jset, 0, WRITE))
+ goto err;
+
+ ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
+ jset->encrypted_start,
+ vstruct_end(jset) - (void *) jset->encrypted_start);
+ if (bch2_fs_fatal_err_on(ret, c,
+ "error decrypting journal entry: %i", ret))
+ goto err;
+
+ jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
+ journal_nonce(jset), jset);
+
+ if (!validate_before_checksum &&
+ jset_validate(c, NULL, jset, 0, WRITE))
+ goto err;
+
+ memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
+
+retry_alloc:
+ spin_lock(&j->lock);
+ ret = journal_write_alloc(j, w);
+
+ if (ret && j->can_discard) {
+ spin_unlock(&j->lock);
+ bch2_journal_do_discards(j);
+ goto retry_alloc;
+ }
+
+ if (ret)
+ __bch2_journal_debug_to_text(&journal_debug_buf, j);
+
+ /*
+ * write is allocated, no longer need to account for it in
+ * bch2_journal_space_available():
+ */
+ w->sectors = 0;
+
+ /*
+ * journal entry has been compacted and allocated, recalculate space
+ * available:
+ */
+ bch2_journal_space_available(j);
+ spin_unlock(&j->lock);
+
+ if (ret) {
+ bch_err(c, "Unable to allocate journal write:\n%s",
+ journal_debug_buf.buf);
+ printbuf_exit(&journal_debug_buf);
+ goto err;
+ }
+
+ w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
+
+ if (c->opts.nochanges)
+ goto no_io;
+
+ for_each_rw_member(ca, c, i)
+ nr_rw_members++;
+
+ if (nr_rw_members > 1)
+ w->separate_flush = true;
+
+ /*
+ * Mark journal replicas before we submit the write to guarantee
+ * recovery will find the journal entries after a crash.
+ */
+ bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
+ w->devs_written);
+ ret = bch2_mark_replicas(c, &replicas.e);
+ if (ret)
+ goto err;
+
+ if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
+ for_each_rw_member(ca, c, i) {
+ percpu_ref_get(&ca->io_ref);
+
+ bio = ca->journal.bio;
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
+ bio->bi_end_io = journal_write_endio;
+ bio->bi_private = ca;
+ closure_bio_submit(bio, cl);
+ }
+ }
+
+ continue_at(cl, do_journal_write, c->io_complete_wq);
+ return;
+no_io:
+ continue_at(cl, journal_write_done, c->io_complete_wq);
+ return;
+err:
+ bch2_fatal_error(c);
+ continue_at(cl, journal_write_done, c->io_complete_wq);
+}
diff --git a/fs/bcachefs/journal_io.h b/fs/bcachefs/journal_io.h
new file mode 100644
index 000000000000..a88d097b13f1
--- /dev/null
+++ b/fs/bcachefs/journal_io.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_IO_H
+#define _BCACHEFS_JOURNAL_IO_H
+
+/*
+ * Only used for holding the journal entries we read in btree_journal_read()
+ * during cache_registration
+ */
+struct journal_replay {
+ struct journal_ptr {
+ bool csum_good;
+ u8 dev;
+ u32 bucket;
+ u32 bucket_offset;
+ u64 sector;
+ } ptrs[BCH_REPLICAS_MAX];
+ unsigned nr_ptrs;
+
+ bool csum_good;
+ bool ignore;
+ /* must be last: */
+ struct jset j;
+};
+
+static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
+ struct jset_entry *entry, unsigned type)
+{
+ while (entry < vstruct_last(jset)) {
+ if (entry->type == type)
+ return entry;
+
+ entry = vstruct_next(entry);
+ }
+
+ return NULL;
+}
+
+#define for_each_jset_entry_type(entry, jset, type) \
+ for (entry = (jset)->start; \
+ (entry = __jset_entry_type_next(jset, entry, type)); \
+ entry = vstruct_next(entry))
+
+#define jset_entry_for_each_key(_e, _k) \
+ for (_k = (_e)->start; \
+ _k < vstruct_last(_e); \
+ _k = bkey_next(_k))
+
+#define for_each_jset_key(k, entry, jset) \
+ for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)\
+ jset_entry_for_each_key(entry, k)
+
+int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
+ struct jset_entry *, unsigned, int,
+ enum bkey_invalid_flags);
+void bch2_journal_entry_to_text(struct printbuf *, struct bch_fs *,
+ struct jset_entry *);
+
+void bch2_journal_ptrs_to_text(struct printbuf *, struct bch_fs *,
+ struct journal_replay *);
+
+int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *);
+
+void bch2_journal_write(struct closure *);
+
+#endif /* _BCACHEFS_JOURNAL_IO_H */
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
new file mode 100644
index 000000000000..9a584aaaa2eb
--- /dev/null
+++ b/fs/bcachefs/journal_reclaim.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_key_cache.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "errcode.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_io.h"
+#include "journal_reclaim.h"
+#include "replicas.h"
+#include "sb-members.h"
+#include "trace.h"
+
+#include <linux/kthread.h>
+#include <linux/sched/mm.h>
+
+/* Free space calculations: */
+
+static unsigned journal_space_from(struct journal_device *ja,
+ enum journal_space_from from)
+{
+ switch (from) {
+ case journal_space_discarded:
+ return ja->discard_idx;
+ case journal_space_clean_ondisk:
+ return ja->dirty_idx_ondisk;
+ case journal_space_clean:
+ return ja->dirty_idx;
+ default:
+ BUG();
+ }
+}
+
+unsigned bch2_journal_dev_buckets_available(struct journal *j,
+ struct journal_device *ja,
+ enum journal_space_from from)
+{
+ unsigned available = (journal_space_from(ja, from) -
+ ja->cur_idx - 1 + ja->nr) % ja->nr;
+
+ /*
+ * Don't use the last bucket unless writing the new last_seq
+ * will make another bucket available:
+ */
+ if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
+ --available;
+
+ return available;
+}
+
+static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
+{
+ union journal_preres_state old, new;
+ u64 v = atomic64_read(&j->prereserved.counter);
+
+ do {
+ old.v = new.v = v;
+ new.remaining = u64s_remaining;
+ } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
+ old.v, new.v)) != old.v);
+}
+
+static struct journal_space
+journal_dev_space_available(struct journal *j, struct bch_dev *ca,
+ enum journal_space_from from)
+{
+ struct journal_device *ja = &ca->journal;
+ unsigned sectors, buckets, unwritten;
+ u64 seq;
+
+ if (from == journal_space_total)
+ return (struct journal_space) {
+ .next_entry = ca->mi.bucket_size,
+ .total = ca->mi.bucket_size * ja->nr,
+ };
+
+ buckets = bch2_journal_dev_buckets_available(j, ja, from);
+ sectors = ja->sectors_free;
+
+ /*
+ * We that we don't allocate the space for a journal entry
+ * until we write it out - thus, account for it here:
+ */
+ for (seq = journal_last_unwritten_seq(j);
+ seq <= journal_cur_seq(j);
+ seq++) {
+ unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
+
+ if (!unwritten)
+ continue;
+
+ /* entry won't fit on this device, skip: */
+ if (unwritten > ca->mi.bucket_size)
+ continue;
+
+ if (unwritten >= sectors) {
+ if (!buckets) {
+ sectors = 0;
+ break;
+ }
+
+ buckets--;
+ sectors = ca->mi.bucket_size;
+ }
+
+ sectors -= unwritten;
+ }
+
+ if (sectors < ca->mi.bucket_size && buckets) {
+ buckets--;
+ sectors = ca->mi.bucket_size;
+ }
+
+ return (struct journal_space) {
+ .next_entry = sectors,
+ .total = sectors + buckets * ca->mi.bucket_size,
+ };
+}
+
+static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
+ enum journal_space_from from)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned i, pos, nr_devs = 0;
+ struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
+
+ BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_journal]) {
+ if (!ca->journal.nr)
+ continue;
+
+ space = journal_dev_space_available(j, ca, from);
+ if (!space.next_entry)
+ continue;
+
+ for (pos = 0; pos < nr_devs; pos++)
+ if (space.total > dev_space[pos].total)
+ break;
+
+ array_insert_item(dev_space, nr_devs, pos, space);
+ }
+ rcu_read_unlock();
+
+ if (nr_devs < nr_devs_want)
+ return (struct journal_space) { 0, 0 };
+
+ /*
+ * We sorted largest to smallest, and we want the smallest out of the
+ * @nr_devs_want largest devices:
+ */
+ return dev_space[nr_devs_want - 1];
+}
+
+void bch2_journal_space_available(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned clean, clean_ondisk, total;
+ s64 u64s_remaining = 0;
+ unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
+ j->buf[1].buf_size >> 9);
+ unsigned i, nr_online = 0, nr_devs_want;
+ bool can_discard = false;
+ int ret = 0;
+
+ lockdep_assert_held(&j->lock);
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_journal]) {
+ struct journal_device *ja = &ca->journal;
+
+ if (!ja->nr)
+ continue;
+
+ while (ja->dirty_idx != ja->cur_idx &&
+ ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
+ ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
+
+ while (ja->dirty_idx_ondisk != ja->dirty_idx &&
+ ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
+ ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
+
+ if (ja->discard_idx != ja->dirty_idx_ondisk)
+ can_discard = true;
+
+ max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
+ nr_online++;
+ }
+ rcu_read_unlock();
+
+ j->can_discard = can_discard;
+
+ if (nr_online < c->opts.metadata_replicas_required) {
+ ret = JOURNAL_ERR_insufficient_devices;
+ goto out;
+ }
+
+ nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
+
+ for (i = 0; i < journal_space_nr; i++)
+ j->space[i] = __journal_space_available(j, nr_devs_want, i);
+
+ clean_ondisk = j->space[journal_space_clean_ondisk].total;
+ clean = j->space[journal_space_clean].total;
+ total = j->space[journal_space_total].total;
+
+ if (!j->space[journal_space_discarded].next_entry)
+ ret = JOURNAL_ERR_journal_full;
+
+ if ((j->space[journal_space_clean_ondisk].next_entry <
+ j->space[journal_space_clean_ondisk].total) &&
+ (clean - clean_ondisk <= total / 8) &&
+ (clean_ondisk * 2 > clean))
+ set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+ else
+ clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+
+ u64s_remaining = (u64) clean << 6;
+ u64s_remaining -= (u64) total << 3;
+ u64s_remaining = max(0LL, u64s_remaining);
+ u64s_remaining /= 4;
+ u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
+out:
+ j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
+ j->cur_entry_error = ret;
+ journal_set_remaining(j, u64s_remaining);
+ journal_set_watermark(j);
+
+ if (!ret)
+ journal_wake(j);
+}
+
+/* Discards - last part of journal reclaim: */
+
+static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
+{
+ bool ret;
+
+ spin_lock(&j->lock);
+ ret = ja->discard_idx != ja->dirty_idx_ondisk;
+ spin_unlock(&j->lock);
+
+ return ret;
+}
+
+/*
+ * Advance ja->discard_idx as long as it points to buckets that are no longer
+ * dirty, issuing discards if necessary:
+ */
+void bch2_journal_do_discards(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned iter;
+
+ mutex_lock(&j->discard_lock);
+
+ for_each_rw_member(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+
+ while (should_discard_bucket(j, ja)) {
+ if (!c->opts.nochanges &&
+ ca->mi.discard &&
+ bdev_max_discard_sectors(ca->disk_sb.bdev))
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ bucket_to_sector(ca,
+ ja->buckets[ja->discard_idx]),
+ ca->mi.bucket_size, GFP_NOFS);
+
+ spin_lock(&j->lock);
+ ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
+
+ bch2_journal_space_available(j);
+ spin_unlock(&j->lock);
+ }
+ }
+
+ mutex_unlock(&j->discard_lock);
+}
+
+/*
+ * Journal entry pinning - machinery for holding a reference on a given journal
+ * entry, holding it open to ensure it gets replayed during recovery:
+ */
+
+void bch2_journal_reclaim_fast(struct journal *j)
+{
+ bool popped = false;
+
+ lockdep_assert_held(&j->lock);
+
+ /*
+ * Unpin journal entries whose reference counts reached zero, meaning
+ * all btree nodes got written out
+ */
+ while (!fifo_empty(&j->pin) &&
+ !atomic_read(&fifo_peek_front(&j->pin).count)) {
+ j->pin.front++;
+ popped = true;
+ }
+
+ if (popped)
+ bch2_journal_space_available(j);
+}
+
+bool __bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+
+ return atomic_dec_and_test(&pin_list->count);
+}
+
+void bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ if (__bch2_journal_pin_put(j, seq)) {
+ spin_lock(&j->lock);
+ bch2_journal_reclaim_fast(j);
+ spin_unlock(&j->lock);
+ }
+}
+
+static inline bool __journal_pin_drop(struct journal *j,
+ struct journal_entry_pin *pin)
+{
+ struct journal_entry_pin_list *pin_list;
+
+ if (!journal_pin_active(pin))
+ return false;
+
+ if (j->flush_in_progress == pin)
+ j->flush_in_progress_dropped = true;
+
+ pin_list = journal_seq_pin(j, pin->seq);
+ pin->seq = 0;
+ list_del_init(&pin->list);
+
+ /*
+ * Unpinning a journal entry may make journal_next_bucket() succeed, if
+ * writing a new last_seq will now make another bucket available:
+ */
+ return atomic_dec_and_test(&pin_list->count) &&
+ pin_list == &fifo_peek_front(&j->pin);
+}
+
+void bch2_journal_pin_drop(struct journal *j,
+ struct journal_entry_pin *pin)
+{
+ spin_lock(&j->lock);
+ if (__journal_pin_drop(j, pin))
+ bch2_journal_reclaim_fast(j);
+ spin_unlock(&j->lock);
+}
+
+static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
+{
+ if (fn == bch2_btree_node_flush0 ||
+ fn == bch2_btree_node_flush1)
+ return JOURNAL_PIN_btree;
+ else if (fn == bch2_btree_key_cache_journal_flush)
+ return JOURNAL_PIN_key_cache;
+ else
+ return JOURNAL_PIN_other;
+}
+
+void bch2_journal_pin_set(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
+{
+ struct journal_entry_pin_list *pin_list;
+ bool reclaim;
+
+ spin_lock(&j->lock);
+
+ if (seq < journal_last_seq(j)) {
+ /*
+ * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
+ * the src pin - with the pin dropped, the entry to pin might no
+ * longer to exist, but that means there's no longer anything to
+ * copy and we can bail out here:
+ */
+ spin_unlock(&j->lock);
+ return;
+ }
+
+ pin_list = journal_seq_pin(j, seq);
+
+ reclaim = __journal_pin_drop(j, pin);
+
+ atomic_inc(&pin_list->count);
+ pin->seq = seq;
+ pin->flush = flush_fn;
+
+ if (flush_fn)
+ list_add(&pin->list, &pin_list->list[journal_pin_type(flush_fn)]);
+ else
+ list_add(&pin->list, &pin_list->flushed);
+
+ if (reclaim)
+ bch2_journal_reclaim_fast(j);
+ spin_unlock(&j->lock);
+
+ /*
+ * If the journal is currently full, we might want to call flush_fn
+ * immediately:
+ */
+ journal_wake(j);
+}
+
+/**
+ * bch2_journal_pin_flush: ensure journal pin callback is no longer running
+ * @j: journal object
+ * @pin: pin to flush
+ */
+void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
+{
+ BUG_ON(journal_pin_active(pin));
+
+ wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
+}
+
+/*
+ * Journal reclaim: flush references to open journal entries to reclaim space in
+ * the journal
+ *
+ * May be done by the journal code in the background as needed to free up space
+ * for more journal entries, or as part of doing a clean shutdown, or to migrate
+ * data off of a specific device:
+ */
+
+static struct journal_entry_pin *
+journal_get_next_pin(struct journal *j,
+ u64 seq_to_flush,
+ unsigned allowed_below_seq,
+ unsigned allowed_above_seq,
+ u64 *seq)
+{
+ struct journal_entry_pin_list *pin_list;
+ struct journal_entry_pin *ret = NULL;
+ unsigned i;
+
+ fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
+ if (*seq > seq_to_flush && !allowed_above_seq)
+ break;
+
+ for (i = 0; i < JOURNAL_PIN_NR; i++)
+ if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
+ ((1U << i) & allowed_above_seq)) {
+ ret = list_first_entry_or_null(&pin_list->list[i],
+ struct journal_entry_pin, list);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return NULL;
+}
+
+/* returns true if we did work */
+static size_t journal_flush_pins(struct journal *j,
+ u64 seq_to_flush,
+ unsigned allowed_below_seq,
+ unsigned allowed_above_seq,
+ unsigned min_any,
+ unsigned min_key_cache)
+{
+ struct journal_entry_pin *pin;
+ size_t nr_flushed = 0;
+ journal_pin_flush_fn flush_fn;
+ u64 seq;
+ int err;
+
+ lockdep_assert_held(&j->reclaim_lock);
+
+ while (1) {
+ unsigned allowed_above = allowed_above_seq;
+ unsigned allowed_below = allowed_below_seq;
+
+ if (min_any) {
+ allowed_above |= ~0;
+ allowed_below |= ~0;
+ }
+
+ if (min_key_cache) {
+ allowed_above |= 1U << JOURNAL_PIN_key_cache;
+ allowed_below |= 1U << JOURNAL_PIN_key_cache;
+ }
+
+ cond_resched();
+
+ j->last_flushed = jiffies;
+
+ spin_lock(&j->lock);
+ pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
+ if (pin) {
+ BUG_ON(j->flush_in_progress);
+ j->flush_in_progress = pin;
+ j->flush_in_progress_dropped = false;
+ flush_fn = pin->flush;
+ }
+ spin_unlock(&j->lock);
+
+ if (!pin)
+ break;
+
+ if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
+ min_key_cache--;
+
+ if (min_any)
+ min_any--;
+
+ err = flush_fn(j, pin, seq);
+
+ spin_lock(&j->lock);
+ /* Pin might have been dropped or rearmed: */
+ if (likely(!err && !j->flush_in_progress_dropped))
+ list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
+ j->flush_in_progress = NULL;
+ j->flush_in_progress_dropped = false;
+ spin_unlock(&j->lock);
+
+ wake_up(&j->pin_flush_wait);
+
+ if (err)
+ break;
+
+ nr_flushed++;
+ }
+
+ return nr_flushed;
+}
+
+static u64 journal_seq_to_flush(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ u64 seq_to_flush = 0;
+ unsigned iter;
+
+ spin_lock(&j->lock);
+
+ for_each_rw_member(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+ unsigned nr_buckets, bucket_to_flush;
+
+ if (!ja->nr)
+ continue;
+
+ /* Try to keep the journal at most half full: */
+ nr_buckets = ja->nr / 2;
+
+ /* And include pre-reservations: */
+ nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
+ (ca->mi.bucket_size << 6) -
+ journal_entry_overhead(j));
+
+ nr_buckets = min(nr_buckets, ja->nr);
+
+ bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
+ seq_to_flush = max(seq_to_flush,
+ ja->bucket_seq[bucket_to_flush]);
+ }
+
+ /* Also flush if the pin fifo is more than half full */
+ seq_to_flush = max_t(s64, seq_to_flush,
+ (s64) journal_cur_seq(j) -
+ (j->pin.size >> 1));
+ spin_unlock(&j->lock);
+
+ return seq_to_flush;
+}
+
+/**
+ * __bch2_journal_reclaim - free up journal buckets
+ * @j: journal object
+ * @direct: direct or background reclaim?
+ * @kicked: requested to run since we last ran?
+ * Returns: 0 on success, or -EIO if the journal has been shutdown
+ *
+ * Background journal reclaim writes out btree nodes. It should be run
+ * early enough so that we never completely run out of journal buckets.
+ *
+ * High watermarks for triggering background reclaim:
+ * - FIFO has fewer than 512 entries left
+ * - fewer than 25% journal buckets free
+ *
+ * Background reclaim runs until low watermarks are reached:
+ * - FIFO has more than 1024 entries left
+ * - more than 50% journal buckets free
+ *
+ * As long as a reclaim can complete in the time it takes to fill up
+ * 512 journal entries or 25% of all journal buckets, then
+ * journal_next_bucket() should not stall.
+ */
+static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bool kthread = (current->flags & PF_KTHREAD) != 0;
+ u64 seq_to_flush;
+ size_t min_nr, min_key_cache, nr_flushed;
+ unsigned flags;
+ int ret = 0;
+
+ /*
+ * We can't invoke memory reclaim while holding the reclaim_lock -
+ * journal reclaim is required to make progress for memory reclaim
+ * (cleaning the caches), so we can't get stuck in memory reclaim while
+ * we're holding the reclaim lock:
+ */
+ lockdep_assert_held(&j->reclaim_lock);
+ flags = memalloc_noreclaim_save();
+
+ do {
+ if (kthread && kthread_should_stop())
+ break;
+
+ if (bch2_journal_error(j)) {
+ ret = -EIO;
+ break;
+ }
+
+ bch2_journal_do_discards(j);
+
+ seq_to_flush = journal_seq_to_flush(j);
+ min_nr = 0;
+
+ /*
+ * If it's been longer than j->reclaim_delay_ms since we last flushed,
+ * make sure to flush at least one journal pin:
+ */
+ if (time_after(jiffies, j->last_flushed +
+ msecs_to_jiffies(c->opts.journal_reclaim_delay)))
+ min_nr = 1;
+
+ if (j->prereserved.reserved * 4 > j->prereserved.remaining)
+ min_nr = 1;
+
+ if (fifo_free(&j->pin) <= 32)
+ min_nr = 1;
+
+ if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
+ min_nr = 1;
+
+ min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
+
+ trace_and_count(c, journal_reclaim_start, c,
+ direct, kicked,
+ min_nr, min_key_cache,
+ j->prereserved.reserved,
+ j->prereserved.remaining,
+ atomic_read(&c->btree_cache.dirty),
+ c->btree_cache.used,
+ atomic_long_read(&c->btree_key_cache.nr_dirty),
+ atomic_long_read(&c->btree_key_cache.nr_keys));
+
+ nr_flushed = journal_flush_pins(j, seq_to_flush,
+ ~0, 0,
+ min_nr, min_key_cache);
+
+ if (direct)
+ j->nr_direct_reclaim += nr_flushed;
+ else
+ j->nr_background_reclaim += nr_flushed;
+ trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
+
+ if (nr_flushed)
+ wake_up(&j->reclaim_wait);
+ } while ((min_nr || min_key_cache) && nr_flushed && !direct);
+
+ memalloc_noreclaim_restore(flags);
+
+ return ret;
+}
+
+int bch2_journal_reclaim(struct journal *j)
+{
+ return __bch2_journal_reclaim(j, true, true);
+}
+
+static int bch2_journal_reclaim_thread(void *arg)
+{
+ struct journal *j = arg;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ unsigned long delay, now;
+ bool journal_empty;
+ int ret = 0;
+
+ set_freezable();
+
+ j->last_flushed = jiffies;
+
+ while (!ret && !kthread_should_stop()) {
+ bool kicked = j->reclaim_kicked;
+
+ j->reclaim_kicked = false;
+
+ mutex_lock(&j->reclaim_lock);
+ ret = __bch2_journal_reclaim(j, false, kicked);
+ mutex_unlock(&j->reclaim_lock);
+
+ now = jiffies;
+ delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
+ j->next_reclaim = j->last_flushed + delay;
+
+ if (!time_in_range(j->next_reclaim, now, now + delay))
+ j->next_reclaim = now + delay;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
+ if (kthread_should_stop())
+ break;
+ if (j->reclaim_kicked)
+ break;
+
+ spin_lock(&j->lock);
+ journal_empty = fifo_empty(&j->pin);
+ spin_unlock(&j->lock);
+
+ if (journal_empty)
+ schedule();
+ else if (time_after(j->next_reclaim, jiffies))
+ schedule_timeout(j->next_reclaim - jiffies);
+ else
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
+ }
+
+ return 0;
+}
+
+void bch2_journal_reclaim_stop(struct journal *j)
+{
+ struct task_struct *p = j->reclaim_thread;
+
+ j->reclaim_thread = NULL;
+
+ if (p) {
+ kthread_stop(p);
+ put_task_struct(p);
+ }
+}
+
+int bch2_journal_reclaim_start(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct task_struct *p;
+ int ret;
+
+ if (j->reclaim_thread)
+ return 0;
+
+ p = kthread_create(bch2_journal_reclaim_thread, j,
+ "bch-reclaim/%s", c->name);
+ ret = PTR_ERR_OR_ZERO(p);
+ if (ret) {
+ bch_err_msg(c, ret, "creating journal reclaim thread");
+ return ret;
+ }
+
+ get_task_struct(p);
+ j->reclaim_thread = p;
+ wake_up_process(p);
+ return 0;
+}
+
+static int journal_flush_done(struct journal *j, u64 seq_to_flush,
+ bool *did_work)
+{
+ int ret;
+
+ ret = bch2_journal_error(j);
+ if (ret)
+ return ret;
+
+ mutex_lock(&j->reclaim_lock);
+
+ if (journal_flush_pins(j, seq_to_flush,
+ (1U << JOURNAL_PIN_key_cache)|
+ (1U << JOURNAL_PIN_other), 0, 0, 0) ||
+ journal_flush_pins(j, seq_to_flush,
+ (1U << JOURNAL_PIN_btree), 0, 0, 0))
+ *did_work = true;
+
+ spin_lock(&j->lock);
+ /*
+ * If journal replay hasn't completed, the unreplayed journal entries
+ * hold refs on their corresponding sequence numbers
+ */
+ ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
+ journal_last_seq(j) > seq_to_flush ||
+ !fifo_used(&j->pin);
+
+ spin_unlock(&j->lock);
+ mutex_unlock(&j->reclaim_lock);
+
+ return ret;
+}
+
+bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
+{
+ bool did_work = false;
+
+ if (!test_bit(JOURNAL_STARTED, &j->flags))
+ return false;
+
+ closure_wait_event(&j->async_wait,
+ journal_flush_done(j, seq_to_flush, &did_work));
+
+ return did_work;
+}
+
+int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct journal_entry_pin_list *p;
+ u64 iter, seq = 0;
+ int ret = 0;
+
+ spin_lock(&j->lock);
+ fifo_for_each_entry_ptr(p, &j->pin, iter)
+ if (dev_idx >= 0
+ ? bch2_dev_list_has_dev(p->devs, dev_idx)
+ : p->devs.nr < c->opts.metadata_replicas)
+ seq = iter;
+ spin_unlock(&j->lock);
+
+ bch2_journal_flush_pins(j, seq);
+
+ ret = bch2_journal_error(j);
+ if (ret)
+ return ret;
+
+ mutex_lock(&c->replicas_gc_lock);
+ bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
+
+ /*
+ * Now that we've populated replicas_gc, write to the journal to mark
+ * active journal devices. This handles the case where the journal might
+ * be empty. Otherwise we could clear all journal replicas and
+ * temporarily put the fs into an unrecoverable state. Journal recovery
+ * expects to find devices marked for journal data on unclean mount.
+ */
+ ret = bch2_journal_meta(&c->journal);
+ if (ret)
+ goto err;
+
+ seq = 0;
+ spin_lock(&j->lock);
+ while (!ret) {
+ struct bch_replicas_padded replicas;
+
+ seq = max(seq, journal_last_seq(j));
+ if (seq >= j->pin.back)
+ break;
+ bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
+ journal_seq_pin(j, seq)->devs);
+ seq++;
+
+ spin_unlock(&j->lock);
+ ret = bch2_mark_replicas(c, &replicas.e);
+ spin_lock(&j->lock);
+ }
+ spin_unlock(&j->lock);
+err:
+ ret = bch2_replicas_gc_end(c, ret);
+ mutex_unlock(&c->replicas_gc_lock);
+
+ return ret;
+}
diff --git a/fs/bcachefs/journal_reclaim.h b/fs/bcachefs/journal_reclaim.h
new file mode 100644
index 000000000000..494d1a6eddb0
--- /dev/null
+++ b/fs/bcachefs/journal_reclaim.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_RECLAIM_H
+#define _BCACHEFS_JOURNAL_RECLAIM_H
+
+#define JOURNAL_PIN (32 * 1024)
+
+static inline void journal_reclaim_kick(struct journal *j)
+{
+ struct task_struct *p = READ_ONCE(j->reclaim_thread);
+
+ j->reclaim_kicked = true;
+ if (p)
+ wake_up_process(p);
+}
+
+unsigned bch2_journal_dev_buckets_available(struct journal *,
+ struct journal_device *,
+ enum journal_space_from);
+void bch2_journal_space_available(struct journal *);
+
+static inline bool journal_pin_active(struct journal_entry_pin *pin)
+{
+ return pin->seq != 0;
+}
+
+static inline struct journal_entry_pin_list *
+journal_seq_pin(struct journal *j, u64 seq)
+{
+ EBUG_ON(seq < j->pin.front || seq >= j->pin.back);
+
+ return &j->pin.data[seq & j->pin.mask];
+}
+
+void bch2_journal_reclaim_fast(struct journal *);
+bool __bch2_journal_pin_put(struct journal *, u64);
+void bch2_journal_pin_put(struct journal *, u64);
+void bch2_journal_pin_drop(struct journal *, struct journal_entry_pin *);
+
+void bch2_journal_pin_set(struct journal *, u64, struct journal_entry_pin *,
+ journal_pin_flush_fn);
+
+static inline void bch2_journal_pin_add(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
+{
+ if (unlikely(!journal_pin_active(pin) || pin->seq > seq))
+ bch2_journal_pin_set(j, seq, pin, flush_fn);
+}
+
+static inline void bch2_journal_pin_copy(struct journal *j,
+ struct journal_entry_pin *dst,
+ struct journal_entry_pin *src,
+ journal_pin_flush_fn flush_fn)
+{
+ /* Guard against racing with journal_pin_drop(src): */
+ u64 seq = READ_ONCE(src->seq);
+
+ if (seq)
+ bch2_journal_pin_add(j, seq, dst, flush_fn);
+}
+
+static inline void bch2_journal_pin_update(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
+{
+ if (unlikely(!journal_pin_active(pin) || pin->seq < seq))
+ bch2_journal_pin_set(j, seq, pin, flush_fn);
+}
+
+void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
+
+void bch2_journal_do_discards(struct journal *);
+int bch2_journal_reclaim(struct journal *);
+
+void bch2_journal_reclaim_stop(struct journal *);
+int bch2_journal_reclaim_start(struct journal *);
+
+bool bch2_journal_flush_pins(struct journal *, u64);
+
+static inline bool bch2_journal_flush_all_pins(struct journal *j)
+{
+ return bch2_journal_flush_pins(j, U64_MAX);
+}
+
+int bch2_journal_flush_device_pins(struct journal *, int);
+
+#endif /* _BCACHEFS_JOURNAL_RECLAIM_H */
diff --git a/fs/bcachefs/journal_sb.c b/fs/bcachefs/journal_sb.c
new file mode 100644
index 000000000000..ae4fb8c3a2bc
--- /dev/null
+++ b/fs/bcachefs/journal_sb.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "journal_sb.h"
+#include "darray.h"
+
+#include <linux/sort.h>
+
+/* BCH_SB_FIELD_journal: */
+
+static int u64_cmp(const void *_l, const void *_r)
+{
+ const u64 *l = _l;
+ const u64 *r = _r;
+
+ return cmp_int(*l, *r);
+}
+
+static int bch2_sb_journal_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_journal *journal = field_to_type(f, journal);
+ struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
+ int ret = -BCH_ERR_invalid_sb_journal;
+ unsigned nr;
+ unsigned i;
+ u64 *b;
+
+ nr = bch2_nr_journal_buckets(journal);
+ if (!nr)
+ return 0;
+
+ b = kmalloc_array(nr, sizeof(u64), GFP_KERNEL);
+ if (!b)
+ return -BCH_ERR_ENOMEM_sb_journal_validate;
+
+ for (i = 0; i < nr; i++)
+ b[i] = le64_to_cpu(journal->buckets[i]);
+
+ sort(b, nr, sizeof(u64), u64_cmp, NULL);
+
+ if (!b[0]) {
+ prt_printf(err, "journal bucket at sector 0");
+ goto err;
+ }
+
+ if (b[0] < le16_to_cpu(m.first_bucket)) {
+ prt_printf(err, "journal bucket %llu before first bucket %u",
+ b[0], le16_to_cpu(m.first_bucket));
+ goto err;
+ }
+
+ if (b[nr - 1] >= le64_to_cpu(m.nbuckets)) {
+ prt_printf(err, "journal bucket %llu past end of device (nbuckets %llu)",
+ b[nr - 1], le64_to_cpu(m.nbuckets));
+ goto err;
+ }
+
+ for (i = 0; i + 1 < nr; i++)
+ if (b[i] == b[i + 1]) {
+ prt_printf(err, "duplicate journal buckets %llu", b[i]);
+ goto err;
+ }
+
+ ret = 0;
+err:
+ kfree(b);
+ return ret;
+}
+
+static void bch2_sb_journal_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_journal *journal = field_to_type(f, journal);
+ unsigned i, nr = bch2_nr_journal_buckets(journal);
+
+ prt_printf(out, "Buckets: ");
+ for (i = 0; i < nr; i++)
+ prt_printf(out, " %llu", le64_to_cpu(journal->buckets[i]));
+ prt_newline(out);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_journal = {
+ .validate = bch2_sb_journal_validate,
+ .to_text = bch2_sb_journal_to_text,
+};
+
+struct u64_range {
+ u64 start;
+ u64 end;
+};
+
+static int u64_range_cmp(const void *_l, const void *_r)
+{
+ const struct u64_range *l = _l;
+ const struct u64_range *r = _r;
+
+ return cmp_int(l->start, r->start);
+}
+
+static int bch2_sb_journal_v2_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_journal_v2 *journal = field_to_type(f, journal_v2);
+ struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
+ int ret = -BCH_ERR_invalid_sb_journal;
+ unsigned nr;
+ unsigned i;
+ struct u64_range *b;
+
+ nr = bch2_sb_field_journal_v2_nr_entries(journal);
+ if (!nr)
+ return 0;
+
+ b = kmalloc_array(nr, sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return -BCH_ERR_ENOMEM_sb_journal_v2_validate;
+
+ for (i = 0; i < nr; i++) {
+ b[i].start = le64_to_cpu(journal->d[i].start);
+ b[i].end = b[i].start + le64_to_cpu(journal->d[i].nr);
+ }
+
+ sort(b, nr, sizeof(*b), u64_range_cmp, NULL);
+
+ if (!b[0].start) {
+ prt_printf(err, "journal bucket at sector 0");
+ goto err;
+ }
+
+ if (b[0].start < le16_to_cpu(m.first_bucket)) {
+ prt_printf(err, "journal bucket %llu before first bucket %u",
+ b[0].start, le16_to_cpu(m.first_bucket));
+ goto err;
+ }
+
+ if (b[nr - 1].end > le64_to_cpu(m.nbuckets)) {
+ prt_printf(err, "journal bucket %llu past end of device (nbuckets %llu)",
+ b[nr - 1].end - 1, le64_to_cpu(m.nbuckets));
+ goto err;
+ }
+
+ for (i = 0; i + 1 < nr; i++) {
+ if (b[i].end > b[i + 1].start) {
+ prt_printf(err, "duplicate journal buckets in ranges %llu-%llu, %llu-%llu",
+ b[i].start, b[i].end, b[i + 1].start, b[i + 1].end);
+ goto err;
+ }
+ }
+
+ ret = 0;
+err:
+ kfree(b);
+ return ret;
+}
+
+static void bch2_sb_journal_v2_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_journal_v2 *journal = field_to_type(f, journal_v2);
+ unsigned i, nr = bch2_sb_field_journal_v2_nr_entries(journal);
+
+ prt_printf(out, "Buckets: ");
+ for (i = 0; i < nr; i++)
+ prt_printf(out, " %llu-%llu",
+ le64_to_cpu(journal->d[i].start),
+ le64_to_cpu(journal->d[i].start) + le64_to_cpu(journal->d[i].nr));
+ prt_newline(out);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_journal_v2 = {
+ .validate = bch2_sb_journal_v2_validate,
+ .to_text = bch2_sb_journal_v2_to_text,
+};
+
+int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca,
+ u64 *buckets, unsigned nr)
+{
+ struct bch_sb_field_journal_v2 *j;
+ unsigned i, dst = 0, nr_compacted = 1;
+
+ if (c)
+ lockdep_assert_held(&c->sb_lock);
+
+ if (!nr) {
+ bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
+ bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal_v2);
+ return 0;
+ }
+
+ for (i = 0; i + 1 < nr; i++)
+ if (buckets[i] + 1 != buckets[i + 1])
+ nr_compacted++;
+
+ j = bch2_sb_field_resize(&ca->disk_sb, journal_v2,
+ (sizeof(*j) + sizeof(j->d[0]) * nr_compacted) / sizeof(u64));
+ if (!j)
+ return -BCH_ERR_ENOSPC_sb_journal;
+
+ bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
+
+ j->d[dst].start = cpu_to_le64(buckets[0]);
+ j->d[dst].nr = cpu_to_le64(1);
+
+ for (i = 1; i < nr; i++) {
+ if (buckets[i] == buckets[i - 1] + 1) {
+ le64_add_cpu(&j->d[dst].nr, 1);
+ } else {
+ dst++;
+ j->d[dst].start = cpu_to_le64(buckets[i]);
+ j->d[dst].nr = cpu_to_le64(1);
+ }
+ }
+
+ BUG_ON(dst + 1 != nr_compacted);
+ return 0;
+}
diff --git a/fs/bcachefs/journal_sb.h b/fs/bcachefs/journal_sb.h
new file mode 100644
index 000000000000..ba40a7e8d90a
--- /dev/null
+++ b/fs/bcachefs/journal_sb.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include "super-io.h"
+#include "vstructs.h"
+
+static inline unsigned bch2_nr_journal_buckets(struct bch_sb_field_journal *j)
+{
+ return j
+ ? (__le64 *) vstruct_end(&j->field) - j->buckets
+ : 0;
+}
+
+static inline unsigned bch2_sb_field_journal_v2_nr_entries(struct bch_sb_field_journal_v2 *j)
+{
+ if (!j)
+ return 0;
+
+ return (struct bch_sb_field_journal_v2_entry *) vstruct_end(&j->field) - &j->d[0];
+}
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_journal;
+extern const struct bch_sb_field_ops bch_sb_field_ops_journal_v2;
+
+int bch2_journal_buckets_to_sb(struct bch_fs *, struct bch_dev *, u64 *, unsigned);
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
new file mode 100644
index 000000000000..f9d9aa95bf3a
--- /dev/null
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_iter.h"
+#include "eytzinger.h"
+#include "journal_seq_blacklist.h"
+#include "super-io.h"
+
+/*
+ * journal_seq_blacklist machinery:
+ *
+ * To guarantee order of btree updates after a crash, we need to detect when a
+ * btree node entry (bset) is newer than the newest journal entry that was
+ * successfully written, and ignore it - effectively ignoring any btree updates
+ * that didn't make it into the journal.
+ *
+ * If we didn't do this, we might have two btree nodes, a and b, both with
+ * updates that weren't written to the journal yet: if b was updated after a,
+ * but b was flushed and not a - oops; on recovery we'll find that the updates
+ * to b happened, but not the updates to a that happened before it.
+ *
+ * Ignoring bsets that are newer than the newest journal entry is always safe,
+ * because everything they contain will also have been journalled - and must
+ * still be present in the journal on disk until a journal entry has been
+ * written _after_ that bset was written.
+ *
+ * To accomplish this, bsets record the newest journal sequence number they
+ * contain updates for; then, on startup, the btree code queries the journal
+ * code to ask "Is this sequence number newer than the newest journal entry? If
+ * so, ignore it."
+ *
+ * When this happens, we must blacklist that journal sequence number: the
+ * journal must not write any entries with that sequence number, and it must
+ * record that it was blacklisted so that a) on recovery we don't think we have
+ * missing journal entries and b) so that the btree code continues to ignore
+ * that bset, until that btree node is rewritten.
+ */
+
+static unsigned sb_blacklist_u64s(unsigned nr)
+{
+ struct bch_sb_field_journal_seq_blacklist *bl;
+
+ return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64);
+}
+
+static struct bch_sb_field_journal_seq_blacklist *
+blacklist_entry_try_merge(struct bch_fs *c,
+ struct bch_sb_field_journal_seq_blacklist *bl,
+ unsigned i)
+{
+ unsigned nr = blacklist_nr_entries(bl);
+
+ if (le64_to_cpu(bl->start[i].end) >=
+ le64_to_cpu(bl->start[i + 1].start)) {
+ bl->start[i].end = bl->start[i + 1].end;
+ --nr;
+ memmove(&bl->start[i],
+ &bl->start[i + 1],
+ sizeof(bl->start[0]) * (nr - i));
+
+ bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
+ sb_blacklist_u64s(nr));
+ BUG_ON(!bl);
+ }
+
+ return bl;
+}
+
+static bool bl_entry_contig_or_overlaps(struct journal_seq_blacklist_entry *e,
+ u64 start, u64 end)
+{
+ return !(end < le64_to_cpu(e->start) || le64_to_cpu(e->end) < start);
+}
+
+int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
+{
+ struct bch_sb_field_journal_seq_blacklist *bl;
+ unsigned i, nr;
+ int ret = 0;
+
+ mutex_lock(&c->sb_lock);
+ bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
+ nr = blacklist_nr_entries(bl);
+
+ for (i = 0; i < nr; i++) {
+ struct journal_seq_blacklist_entry *e =
+ bl->start + i;
+
+ if (bl_entry_contig_or_overlaps(e, start, end)) {
+ e->start = cpu_to_le64(min(start, le64_to_cpu(e->start)));
+ e->end = cpu_to_le64(max(end, le64_to_cpu(e->end)));
+
+ if (i + 1 < nr)
+ bl = blacklist_entry_try_merge(c,
+ bl, i);
+ if (i)
+ bl = blacklist_entry_try_merge(c,
+ bl, i - 1);
+ goto out_write_sb;
+ }
+ }
+
+ bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
+ sb_blacklist_u64s(nr + 1));
+ if (!bl) {
+ ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
+ goto out;
+ }
+
+ bl->start[nr].start = cpu_to_le64(start);
+ bl->start[nr].end = cpu_to_le64(end);
+out_write_sb:
+ c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3);
+
+ ret = bch2_write_super(c);
+out:
+ mutex_unlock(&c->sb_lock);
+
+ return ret ?: bch2_blacklist_table_initialize(c);
+}
+
+static int journal_seq_blacklist_table_cmp(const void *_l,
+ const void *_r, size_t size)
+{
+ const struct journal_seq_blacklist_table_entry *l = _l;
+ const struct journal_seq_blacklist_table_entry *r = _r;
+
+ return cmp_int(l->start, r->start);
+}
+
+bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq,
+ bool dirty)
+{
+ struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
+ struct journal_seq_blacklist_table_entry search = { .start = seq };
+ int idx;
+
+ if (!t)
+ return false;
+
+ idx = eytzinger0_find_le(t->entries, t->nr,
+ sizeof(t->entries[0]),
+ journal_seq_blacklist_table_cmp,
+ &search);
+ if (idx < 0)
+ return false;
+
+ BUG_ON(t->entries[idx].start > seq);
+
+ if (seq >= t->entries[idx].end)
+ return false;
+
+ if (dirty)
+ t->entries[idx].dirty = true;
+ return true;
+}
+
+int bch2_blacklist_table_initialize(struct bch_fs *c)
+{
+ struct bch_sb_field_journal_seq_blacklist *bl =
+ bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
+ struct journal_seq_blacklist_table *t;
+ unsigned i, nr = blacklist_nr_entries(bl);
+
+ if (!bl)
+ return 0;
+
+ t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
+ GFP_KERNEL);
+ if (!t)
+ return -BCH_ERR_ENOMEM_blacklist_table_init;
+
+ t->nr = nr;
+
+ for (i = 0; i < nr; i++) {
+ t->entries[i].start = le64_to_cpu(bl->start[i].start);
+ t->entries[i].end = le64_to_cpu(bl->start[i].end);
+ }
+
+ eytzinger0_sort(t->entries,
+ t->nr,
+ sizeof(t->entries[0]),
+ journal_seq_blacklist_table_cmp,
+ NULL);
+
+ kfree(c->journal_seq_blacklist_table);
+ c->journal_seq_blacklist_table = t;
+ return 0;
+}
+
+static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_journal_seq_blacklist *bl =
+ field_to_type(f, journal_seq_blacklist);
+ unsigned i, nr = blacklist_nr_entries(bl);
+
+ for (i = 0; i < nr; i++) {
+ struct journal_seq_blacklist_entry *e = bl->start + i;
+
+ if (le64_to_cpu(e->start) >=
+ le64_to_cpu(e->end)) {
+ prt_printf(err, "entry %u start >= end (%llu >= %llu)",
+ i, le64_to_cpu(e->start), le64_to_cpu(e->end));
+ return -BCH_ERR_invalid_sb_journal_seq_blacklist;
+ }
+
+ if (i + 1 < nr &&
+ le64_to_cpu(e[0].end) >
+ le64_to_cpu(e[1].start)) {
+ prt_printf(err, "entry %u out of order with next entry (%llu > %llu)",
+ i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start));
+ return -BCH_ERR_invalid_sb_journal_seq_blacklist;
+ }
+ }
+
+ return 0;
+}
+
+static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out,
+ struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_journal_seq_blacklist *bl =
+ field_to_type(f, journal_seq_blacklist);
+ struct journal_seq_blacklist_entry *i;
+ unsigned nr = blacklist_nr_entries(bl);
+
+ for (i = bl->start; i < bl->start + nr; i++) {
+ if (i != bl->start)
+ prt_printf(out, " ");
+
+ prt_printf(out, "%llu-%llu",
+ le64_to_cpu(i->start),
+ le64_to_cpu(i->end));
+ }
+ prt_newline(out);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
+ .validate = bch2_sb_journal_seq_blacklist_validate,
+ .to_text = bch2_sb_journal_seq_blacklist_to_text
+};
+
+void bch2_blacklist_entries_gc(struct work_struct *work)
+{
+ struct bch_fs *c = container_of(work, struct bch_fs,
+ journal_seq_blacklist_gc_work);
+ struct journal_seq_blacklist_table *t;
+ struct bch_sb_field_journal_seq_blacklist *bl;
+ struct journal_seq_blacklist_entry *src, *dst;
+ struct btree_trans *trans = bch2_trans_get(c);
+ unsigned i, nr, new_nr;
+ int ret;
+
+ for (i = 0; i < BTREE_ID_NR; i++) {
+ struct btree_iter iter;
+ struct btree *b;
+
+ bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
+ 0, 0, BTREE_ITER_PREFETCH);
+retry:
+ bch2_trans_begin(trans);
+
+ b = bch2_btree_iter_peek_node(&iter);
+
+ while (!(ret = PTR_ERR_OR_ZERO(b)) &&
+ b &&
+ !test_bit(BCH_FS_STOPPING, &c->flags))
+ b = bch2_btree_iter_next_node(&iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ bch2_trans_put(trans);
+ if (ret)
+ return;
+
+ mutex_lock(&c->sb_lock);
+ bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
+ if (!bl)
+ goto out;
+
+ nr = blacklist_nr_entries(bl);
+ dst = bl->start;
+
+ t = c->journal_seq_blacklist_table;
+ BUG_ON(nr != t->nr);
+
+ for (src = bl->start, i = eytzinger0_first(t->nr);
+ src < bl->start + nr;
+ src++, i = eytzinger0_next(i, nr)) {
+ BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
+ BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
+
+ if (t->entries[i].dirty)
+ *dst++ = *src;
+ }
+
+ new_nr = dst - bl->start;
+
+ bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
+
+ if (new_nr != nr) {
+ bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
+ new_nr ? sb_blacklist_u64s(new_nr) : 0);
+ BUG_ON(new_nr && !bl);
+
+ if (!new_nr)
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
+
+ bch2_write_super(c);
+ }
+out:
+ mutex_unlock(&c->sb_lock);
+}
diff --git a/fs/bcachefs/journal_seq_blacklist.h b/fs/bcachefs/journal_seq_blacklist.h
new file mode 100644
index 000000000000..afb886ec8e25
--- /dev/null
+++ b/fs/bcachefs/journal_seq_blacklist.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
+#define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
+
+static inline unsigned
+blacklist_nr_entries(struct bch_sb_field_journal_seq_blacklist *bl)
+{
+ return bl
+ ? ((vstruct_end(&bl->field) - (void *) &bl->start[0]) /
+ sizeof(struct journal_seq_blacklist_entry))
+ : 0;
+}
+
+bool bch2_journal_seq_is_blacklisted(struct bch_fs *, u64, bool);
+int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64, u64);
+int bch2_blacklist_table_initialize(struct bch_fs *);
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist;
+
+void bch2_blacklist_entries_gc(struct work_struct *);
+
+#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H */
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
new file mode 100644
index 000000000000..42504e16acb6
--- /dev/null
+++ b/fs/bcachefs/journal_types.h
@@ -0,0 +1,345 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_JOURNAL_TYPES_H
+#define _BCACHEFS_JOURNAL_TYPES_H
+
+#include <linux/cache.h>
+#include <linux/workqueue.h>
+
+#include "alloc_types.h"
+#include "super_types.h"
+#include "fifo.h"
+
+#define JOURNAL_BUF_BITS 2
+#define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS)
+#define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1)
+
+/*
+ * We put JOURNAL_BUF_NR of these in struct journal; we used them for writes to
+ * the journal that are being staged or in flight.
+ */
+struct journal_buf {
+ struct jset *data;
+
+ __BKEY_PADDED(key, BCH_REPLICAS_MAX);
+ struct bch_devs_list devs_written;
+
+ struct closure_waitlist wait;
+ u64 last_seq; /* copy of data->last_seq */
+ long expires;
+ u64 flush_time;
+
+ unsigned buf_size; /* size in bytes of @data */
+ unsigned sectors; /* maximum size for current entry */
+ unsigned disk_sectors; /* maximum size entry could have been, if
+ buf_size was bigger */
+ unsigned u64s_reserved;
+ bool noflush; /* write has already been kicked off, and was noflush */
+ bool must_flush; /* something wants a flush */
+ bool separate_flush;
+};
+
+/*
+ * Something that makes a journal entry dirty - i.e. a btree node that has to be
+ * flushed:
+ */
+
+enum journal_pin_type {
+ JOURNAL_PIN_btree,
+ JOURNAL_PIN_key_cache,
+ JOURNAL_PIN_other,
+ JOURNAL_PIN_NR,
+};
+
+struct journal_entry_pin_list {
+ struct list_head list[JOURNAL_PIN_NR];
+ struct list_head flushed;
+ atomic_t count;
+ struct bch_devs_list devs;
+};
+
+struct journal;
+struct journal_entry_pin;
+typedef int (*journal_pin_flush_fn)(struct journal *j,
+ struct journal_entry_pin *, u64);
+
+struct journal_entry_pin {
+ struct list_head list;
+ journal_pin_flush_fn flush;
+ u64 seq;
+};
+
+struct journal_res {
+ bool ref;
+ u8 idx;
+ u16 u64s;
+ u32 offset;
+ u64 seq;
+};
+
+/*
+ * For reserving space in the journal prior to getting a reservation on a
+ * particular journal entry:
+ */
+struct journal_preres {
+ unsigned u64s;
+};
+
+union journal_res_state {
+ struct {
+ atomic64_t counter;
+ };
+
+ struct {
+ u64 v;
+ };
+
+ struct {
+ u64 cur_entry_offset:20,
+ idx:2,
+ unwritten_idx:2,
+ buf0_count:10,
+ buf1_count:10,
+ buf2_count:10,
+ buf3_count:10;
+ };
+};
+
+union journal_preres_state {
+ struct {
+ atomic64_t counter;
+ };
+
+ struct {
+ u64 v;
+ };
+
+ struct {
+ u64 waiting:1,
+ reserved:31,
+ remaining:32;
+ };
+};
+
+/* bytes: */
+#define JOURNAL_ENTRY_SIZE_MIN (64U << 10) /* 64k */
+#define JOURNAL_ENTRY_SIZE_MAX (4U << 20) /* 4M */
+
+/*
+ * We stash some journal state as sentinal values in cur_entry_offset:
+ * note - cur_entry_offset is in units of u64s
+ */
+#define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1)
+
+#define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1)
+#define JOURNAL_ENTRY_ERROR_VAL (JOURNAL_ENTRY_OFFSET_MAX)
+
+struct journal_space {
+ /* Units of 512 bytes sectors: */
+ unsigned next_entry; /* How big the next journal entry can be */
+ unsigned total;
+};
+
+enum journal_space_from {
+ journal_space_discarded,
+ journal_space_clean_ondisk,
+ journal_space_clean,
+ journal_space_total,
+ journal_space_nr,
+};
+
+enum journal_flags {
+ JOURNAL_REPLAY_DONE,
+ JOURNAL_STARTED,
+ JOURNAL_MAY_SKIP_FLUSH,
+ JOURNAL_NEED_FLUSH_WRITE,
+};
+
+/* Reasons we may fail to get a journal reservation: */
+#define JOURNAL_ERRORS() \
+ x(ok) \
+ x(blocked) \
+ x(max_in_flight) \
+ x(journal_full) \
+ x(journal_pin_full) \
+ x(journal_stuck) \
+ x(insufficient_devices)
+
+enum journal_errors {
+#define x(n) JOURNAL_ERR_##n,
+ JOURNAL_ERRORS()
+#undef x
+};
+
+typedef DARRAY(u64) darray_u64;
+
+/* Embedded in struct bch_fs */
+struct journal {
+ /* Fastpath stuff up front: */
+ struct {
+
+ union journal_res_state reservations;
+ enum bch_watermark watermark;
+
+ union journal_preres_state prereserved;
+
+ } __aligned(SMP_CACHE_BYTES);
+
+ unsigned long flags;
+
+ /* Max size of current journal entry */
+ unsigned cur_entry_u64s;
+ unsigned cur_entry_sectors;
+
+ /* Reserved space in journal entry to be used just prior to write */
+ unsigned entry_u64s_reserved;
+
+
+ /*
+ * 0, or -ENOSPC if waiting on journal reclaim, or -EROFS if
+ * insufficient devices:
+ */
+ enum journal_errors cur_entry_error;
+
+ unsigned buf_size_want;
+ /*
+ * We may queue up some things to be journalled (log messages) before
+ * the journal has actually started - stash them here:
+ */
+ darray_u64 early_journal_entries;
+
+ /*
+ * Two journal entries -- one is currently open for new entries, the
+ * other is possibly being written out.
+ */
+ struct journal_buf buf[JOURNAL_BUF_NR];
+
+ spinlock_t lock;
+
+ /* if nonzero, we may not open a new journal entry: */
+ unsigned blocked;
+
+ /* Used when waiting because the journal was full */
+ wait_queue_head_t wait;
+ struct closure_waitlist async_wait;
+ struct closure_waitlist preres_wait;
+
+ struct closure io;
+ struct delayed_work write_work;
+
+ /* Sequence number of most recent journal entry (last entry in @pin) */
+ atomic64_t seq;
+
+ /* seq, last_seq from the most recent journal entry successfully written */
+ u64 seq_ondisk;
+ u64 flushed_seq_ondisk;
+ u64 last_seq_ondisk;
+ u64 err_seq;
+ u64 last_empty_seq;
+
+ /*
+ * FIFO of journal entries whose btree updates have not yet been
+ * written out.
+ *
+ * Each entry is a reference count. The position in the FIFO is the
+ * entry's sequence number relative to @seq.
+ *
+ * The journal entry itself holds a reference count, put when the
+ * journal entry is written out. Each btree node modified by the journal
+ * entry also holds a reference count, put when the btree node is
+ * written.
+ *
+ * When a reference count reaches zero, the journal entry is no longer
+ * needed. When all journal entries in the oldest journal bucket are no
+ * longer needed, the bucket can be discarded and reused.
+ */
+ struct {
+ u64 front, back, size, mask;
+ struct journal_entry_pin_list *data;
+ } pin;
+
+ struct journal_space space[journal_space_nr];
+
+ u64 replay_journal_seq;
+ u64 replay_journal_seq_end;
+
+ struct write_point wp;
+ spinlock_t err_lock;
+
+ struct mutex reclaim_lock;
+ /*
+ * Used for waiting until journal reclaim has freed up space in the
+ * journal:
+ */
+ wait_queue_head_t reclaim_wait;
+ struct task_struct *reclaim_thread;
+ bool reclaim_kicked;
+ unsigned long next_reclaim;
+ u64 nr_direct_reclaim;
+ u64 nr_background_reclaim;
+
+ unsigned long last_flushed;
+ struct journal_entry_pin *flush_in_progress;
+ bool flush_in_progress_dropped;
+ wait_queue_head_t pin_flush_wait;
+
+ /* protects advancing ja->discard_idx: */
+ struct mutex discard_lock;
+ bool can_discard;
+
+ unsigned long last_flush_write;
+
+ u64 res_get_blocked_start;
+ u64 write_start_time;
+
+ u64 nr_flush_writes;
+ u64 nr_noflush_writes;
+
+ struct bch2_time_stats *flush_write_time;
+ struct bch2_time_stats *noflush_write_time;
+ struct bch2_time_stats *blocked_time;
+ struct bch2_time_stats *flush_seq_time;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map res_map;
+#endif
+} __aligned(SMP_CACHE_BYTES);
+
+/*
+ * Embedded in struct bch_dev. First three fields refer to the array of journal
+ * buckets, in bch_sb.
+ */
+struct journal_device {
+ /*
+ * For each journal bucket, contains the max sequence number of the
+ * journal writes it contains - so we know when a bucket can be reused.
+ */
+ u64 *bucket_seq;
+
+ unsigned sectors_free;
+
+ /*
+ * discard_idx <= dirty_idx_ondisk <= dirty_idx <= cur_idx:
+ */
+ unsigned discard_idx; /* Next bucket to discard */
+ unsigned dirty_idx_ondisk;
+ unsigned dirty_idx;
+ unsigned cur_idx; /* Journal bucket we're currently writing to */
+ unsigned nr;
+
+ u64 *buckets;
+
+ /* Bio for journal reads/writes to this device */
+ struct bio *bio;
+
+ /* for bch_journal_read_device */
+ struct closure read;
+};
+
+/*
+ * journal_entry_res - reserve space in every journal entry:
+ */
+struct journal_entry_res {
+ unsigned u64s;
+};
+
+#endif /* _BCACHEFS_JOURNAL_TYPES_H */
diff --git a/fs/bcachefs/keylist.c b/fs/bcachefs/keylist.c
new file mode 100644
index 000000000000..5699cd4873c8
--- /dev/null
+++ b/fs/bcachefs/keylist.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey.h"
+#include "keylist.h"
+
+int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s,
+ size_t nr_inline_u64s, size_t new_u64s)
+{
+ size_t oldsize = bch2_keylist_u64s(l);
+ size_t newsize = oldsize + new_u64s;
+ u64 *old_buf = l->keys_p == inline_u64s ? NULL : l->keys_p;
+ u64 *new_keys;
+
+ newsize = roundup_pow_of_two(newsize);
+
+ if (newsize <= nr_inline_u64s ||
+ (old_buf && roundup_pow_of_two(oldsize) == newsize))
+ return 0;
+
+ new_keys = krealloc(old_buf, sizeof(u64) * newsize, GFP_NOFS);
+ if (!new_keys)
+ return -ENOMEM;
+
+ if (!old_buf)
+ memcpy_u64s(new_keys, inline_u64s, oldsize);
+
+ l->keys_p = new_keys;
+ l->top_p = new_keys + oldsize;
+
+ return 0;
+}
+
+void bch2_keylist_pop_front(struct keylist *l)
+{
+ l->top_p -= bch2_keylist_front(l)->k.u64s;
+
+ memmove_u64s_down(l->keys,
+ bkey_next(l->keys),
+ bch2_keylist_u64s(l));
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_verify_keylist_sorted(struct keylist *l)
+{
+ struct bkey_i *k;
+
+ for_each_keylist_key(l, k)
+ BUG_ON(bkey_next(k) != l->top &&
+ bpos_ge(k->k.p, bkey_next(k)->k.p));
+}
+#endif
diff --git a/fs/bcachefs/keylist.h b/fs/bcachefs/keylist.h
new file mode 100644
index 000000000000..fe759c7031e0
--- /dev/null
+++ b/fs/bcachefs/keylist.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_KEYLIST_H
+#define _BCACHEFS_KEYLIST_H
+
+#include "keylist_types.h"
+
+int bch2_keylist_realloc(struct keylist *, u64 *, size_t, size_t);
+void bch2_keylist_pop_front(struct keylist *);
+
+static inline void bch2_keylist_init(struct keylist *l, u64 *inline_keys)
+{
+ l->top_p = l->keys_p = inline_keys;
+}
+
+static inline void bch2_keylist_free(struct keylist *l, u64 *inline_keys)
+{
+ if (l->keys_p != inline_keys)
+ kfree(l->keys_p);
+}
+
+static inline void bch2_keylist_push(struct keylist *l)
+{
+ l->top = bkey_next(l->top);
+}
+
+static inline void bch2_keylist_add(struct keylist *l, const struct bkey_i *k)
+{
+ bkey_copy(l->top, k);
+ bch2_keylist_push(l);
+}
+
+static inline bool bch2_keylist_empty(struct keylist *l)
+{
+ return l->top == l->keys;
+}
+
+static inline size_t bch2_keylist_u64s(struct keylist *l)
+{
+ return l->top_p - l->keys_p;
+}
+
+static inline size_t bch2_keylist_bytes(struct keylist *l)
+{
+ return bch2_keylist_u64s(l) * sizeof(u64);
+}
+
+static inline struct bkey_i *bch2_keylist_front(struct keylist *l)
+{
+ return l->keys;
+}
+
+#define for_each_keylist_key(_keylist, _k) \
+ for (_k = (_keylist)->keys; \
+ _k != (_keylist)->top; \
+ _k = bkey_next(_k))
+
+static inline u64 keylist_sectors(struct keylist *keys)
+{
+ struct bkey_i *k;
+ u64 ret = 0;
+
+ for_each_keylist_key(keys, k)
+ ret += k->k.size;
+
+ return ret;
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_verify_keylist_sorted(struct keylist *);
+#else
+static inline void bch2_verify_keylist_sorted(struct keylist *l) {}
+#endif
+
+#endif /* _BCACHEFS_KEYLIST_H */
diff --git a/fs/bcachefs/keylist_types.h b/fs/bcachefs/keylist_types.h
new file mode 100644
index 000000000000..4b3ff7d8a875
--- /dev/null
+++ b/fs/bcachefs/keylist_types.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_KEYLIST_TYPES_H
+#define _BCACHEFS_KEYLIST_TYPES_H
+
+struct keylist {
+ union {
+ struct bkey_i *keys;
+ u64 *keys_p;
+ };
+ union {
+ struct bkey_i *top;
+ u64 *top_p;
+ };
+};
+
+#endif /* _BCACHEFS_KEYLIST_TYPES_H */
diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c
new file mode 100644
index 000000000000..8640f7dee0de
--- /dev/null
+++ b/fs/bcachefs/logged_ops.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "error.h"
+#include "io_misc.h"
+#include "logged_ops.h"
+#include "super.h"
+
+struct bch_logged_op_fn {
+ u8 type;
+ int (*resume)(struct btree_trans *, struct bkey_i *);
+};
+
+static const struct bch_logged_op_fn logged_op_fns[] = {
+#define x(n) { \
+ .type = KEY_TYPE_logged_op_##n, \
+ .resume = bch2_resume_logged_op_##n, \
+},
+ BCH_LOGGED_OPS()
+#undef x
+};
+
+static const struct bch_logged_op_fn *logged_op_fn(enum bch_bkey_type type)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(logged_op_fns); i++)
+ if (logged_op_fns[i].type == type)
+ return logged_op_fns + i;
+ return NULL;
+}
+
+static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type);
+ struct bkey_buf sk;
+ u32 restart_count = trans->restart_count;
+ int ret;
+
+ if (!fn)
+ return 0;
+
+ bch2_bkey_buf_init(&sk);
+ bch2_bkey_buf_reassemble(&sk, c, k);
+
+ ret = drop_locks_do(trans, (bch2_fs_lazy_rw(c), 0)) ?:
+ fn->resume(trans, sk.k) ?: trans_was_restarted(trans, restart_count);
+
+ bch2_bkey_buf_exit(&sk, c);
+ return ret;
+}
+
+int bch2_resume_logged_ops(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key2(trans, iter,
+ BTREE_ID_logged_ops, POS_MIN, BTREE_ITER_PREFETCH, k,
+ resume_logged_op(trans, &iter, k)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int __bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k)
+{
+ struct btree_iter iter;
+ int ret;
+
+ ret = bch2_bkey_get_empty_slot(trans, &iter, BTREE_ID_logged_ops, POS_MAX);
+ if (ret)
+ return ret;
+
+ k->k.p = iter.pos;
+
+ ret = bch2_trans_update(trans, &iter, k, 0);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k)
+{
+ return commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ __bch2_logged_op_start(trans, k));
+}
+
+void bch2_logged_op_finish(struct btree_trans *trans, struct bkey_i *k)
+{
+ int ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_btree_delete(trans, BTREE_ID_logged_ops, k->k.p, 0));
+ /*
+ * This needs to be a fatal error because we've left an unfinished
+ * operation in the logged ops btree.
+ *
+ * We should only ever see an error here if the filesystem has already
+ * been shut down, but make sure of that here:
+ */
+ if (ret) {
+ struct bch_fs *c = trans->c;
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+ bch2_fs_fatal_error(c, "%s: error deleting logged operation %s: %s",
+ __func__, buf.buf, bch2_err_str(ret));
+ printbuf_exit(&buf);
+ }
+}
diff --git a/fs/bcachefs/logged_ops.h b/fs/bcachefs/logged_ops.h
new file mode 100644
index 000000000000..4d1e786a27a8
--- /dev/null
+++ b/fs/bcachefs/logged_ops.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_LOGGED_OPS_H
+#define _BCACHEFS_LOGGED_OPS_H
+
+#include "bkey.h"
+
+#define BCH_LOGGED_OPS() \
+ x(truncate) \
+ x(finsert)
+
+static inline int bch2_logged_op_update(struct btree_trans *trans, struct bkey_i *op)
+{
+ return bch2_btree_insert_nonextent(trans, BTREE_ID_logged_ops, op, 0);
+}
+
+int bch2_resume_logged_ops(struct bch_fs *);
+int bch2_logged_op_start(struct btree_trans *, struct bkey_i *);
+void bch2_logged_op_finish(struct btree_trans *, struct bkey_i *);
+
+#endif /* _BCACHEFS_LOGGED_OPS_H */
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
new file mode 100644
index 000000000000..215a653322f3
--- /dev/null
+++ b/fs/bcachefs/lru.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "btree_iter.h"
+#include "btree_update.h"
+#include "btree_write_buffer.h"
+#include "error.h"
+#include "lru.h"
+#include "recovery.h"
+
+/* KEY_TYPE_lru is obsolete: */
+int bch2_lru_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (!lru_pos_time(k.k->p)) {
+ prt_printf(err, "lru entry at time=0");
+ return -BCH_ERR_invalid_bkey;
+
+ }
+
+ return 0;
+}
+
+void bch2_lru_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ const struct bch_lru *lru = bkey_s_c_to_lru(k).v;
+
+ prt_printf(out, "idx %llu", le64_to_cpu(lru->idx));
+}
+
+void bch2_lru_pos_to_text(struct printbuf *out, struct bpos lru)
+{
+ prt_printf(out, "%llu:%llu -> %llu:%llu",
+ lru_pos_id(lru),
+ lru_pos_time(lru),
+ u64_to_bucket(lru.offset).inode,
+ u64_to_bucket(lru.offset).offset);
+}
+
+static int __bch2_lru_set(struct btree_trans *trans, u16 lru_id,
+ u64 dev_bucket, u64 time, bool set)
+{
+ return time
+ ? bch2_btree_bit_mod(trans, BTREE_ID_lru,
+ lru_pos(lru_id, dev_bucket, time), set)
+ : 0;
+}
+
+int bch2_lru_del(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time)
+{
+ return __bch2_lru_set(trans, lru_id, dev_bucket, time, KEY_TYPE_deleted);
+}
+
+int bch2_lru_set(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time)
+{
+ return __bch2_lru_set(trans, lru_id, dev_bucket, time, KEY_TYPE_set);
+}
+
+int bch2_lru_change(struct btree_trans *trans,
+ u16 lru_id, u64 dev_bucket,
+ u64 old_time, u64 new_time)
+{
+ if (old_time == new_time)
+ return 0;
+
+ return bch2_lru_del(trans, lru_id, dev_bucket, old_time) ?:
+ bch2_lru_set(trans, lru_id, dev_bucket, new_time);
+}
+
+static const char * const bch2_lru_types[] = {
+#define x(n) #n,
+ BCH_LRU_TYPES()
+#undef x
+ NULL
+};
+
+static int bch2_check_lru_key(struct btree_trans *trans,
+ struct btree_iter *lru_iter,
+ struct bkey_s_c lru_k,
+ struct bpos *last_flushed_pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ enum bch_lru_type type = lru_type(lru_k);
+ struct bpos alloc_pos = u64_to_bucket(lru_k.k->p.offset);
+ u64 idx;
+ int ret;
+
+ if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_pos), c,
+ "lru key points to nonexistent device:bucket %llu:%llu",
+ alloc_pos.inode, alloc_pos.offset))
+ return bch2_btree_delete_at(trans, lru_iter, 0);
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, alloc_pos, 0);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ a = bch2_alloc_to_v4(k, &a_convert);
+
+ switch (type) {
+ case BCH_LRU_read:
+ idx = alloc_lru_idx_read(*a);
+ break;
+ case BCH_LRU_fragmentation:
+ idx = a->fragmentation_lru;
+ break;
+ }
+
+ if (lru_k.k->type != KEY_TYPE_set ||
+ lru_pos_time(lru_k.k->p) != idx) {
+ if (!bpos_eq(*last_flushed_pos, lru_k.k->p)) {
+ *last_flushed_pos = lru_k.k->p;
+ ret = bch2_btree_write_buffer_flush_sync(trans) ?:
+ -BCH_ERR_transaction_restart_write_buffer_flush;
+ goto out;
+ }
+
+ if (c->opts.reconstruct_alloc ||
+ fsck_err(c, "incorrect lru entry: lru %s time %llu\n"
+ " %s\n"
+ " for %s",
+ bch2_lru_types[type],
+ lru_pos_time(lru_k.k->p),
+ (bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),
+ (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf)))
+ ret = bch2_btree_delete_at(trans, lru_iter, 0);
+ }
+out:
+err:
+fsck_err:
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ return ret;
+}
+
+int bch2_check_lrus(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bpos last_flushed_pos = POS_MIN;
+ int ret = 0;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ bch2_check_lru_key(trans, &iter, k, &last_flushed_pos)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+
+}
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
new file mode 100644
index 000000000000..be66bf9ad809
--- /dev/null
+++ b/fs/bcachefs/lru.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_LRU_H
+#define _BCACHEFS_LRU_H
+
+#define LRU_TIME_BITS 48
+#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
+
+static inline u64 lru_pos_id(struct bpos pos)
+{
+ return pos.inode >> LRU_TIME_BITS;
+}
+
+static inline u64 lru_pos_time(struct bpos pos)
+{
+ return pos.inode & ~(~0ULL << LRU_TIME_BITS);
+}
+
+static inline struct bpos lru_pos(u16 lru_id, u64 dev_bucket, u64 time)
+{
+ struct bpos pos = POS(((u64) lru_id << LRU_TIME_BITS)|time, dev_bucket);
+
+ EBUG_ON(time > LRU_TIME_MAX);
+ EBUG_ON(lru_pos_id(pos) != lru_id);
+ EBUG_ON(lru_pos_time(pos) != time);
+ EBUG_ON(pos.offset != dev_bucket);
+
+ return pos;
+}
+
+#define BCH_LRU_TYPES() \
+ x(read) \
+ x(fragmentation)
+
+enum bch_lru_type {
+#define x(n) BCH_LRU_##n,
+ BCH_LRU_TYPES()
+#undef x
+};
+
+#define BCH_LRU_FRAGMENTATION_START ((1U << 16) - 1)
+
+static inline enum bch_lru_type lru_type(struct bkey_s_c l)
+{
+ u16 lru_id = l.k->p.inode >> 48;
+
+ if (lru_id == BCH_LRU_FRAGMENTATION_START)
+ return BCH_LRU_fragmentation;
+ return BCH_LRU_read;
+}
+
+int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+void bch2_lru_pos_to_text(struct printbuf *, struct bpos);
+
+#define bch2_bkey_ops_lru ((struct bkey_ops) { \
+ .key_invalid = bch2_lru_invalid, \
+ .val_to_text = bch2_lru_to_text, \
+ .min_val_size = 8, \
+})
+
+int bch2_lru_del(struct btree_trans *, u16, u64, u64);
+int bch2_lru_set(struct btree_trans *, u16, u64, u64);
+int bch2_lru_change(struct btree_trans *, u16, u64, u64, u64);
+
+int bch2_check_lrus(struct bch_fs *);
+
+#endif /* _BCACHEFS_LRU_H */
diff --git a/fs/bcachefs/mean_and_variance.c b/fs/bcachefs/mean_and_variance.c
new file mode 100644
index 000000000000..1f0801e2e565
--- /dev/null
+++ b/fs/bcachefs/mean_and_variance.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for incremental mean and variance.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * Copyright © 2022 Daniel B. Hill
+ *
+ * Author: Daniel B. Hill <daniel@gluo.nz>
+ *
+ * Description:
+ *
+ * This is includes some incremental algorithms for mean and variance calculation
+ *
+ * Derived from the paper: https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf
+ *
+ * Create a struct and if it's the weighted variant set the w field (weight = 2^k).
+ *
+ * Use mean_and_variance[_weighted]_update() on the struct to update it's state.
+ *
+ * Use the mean_and_variance[_weighted]_get_* functions to calculate the mean and variance, some computation
+ * is deferred to these functions for performance reasons.
+ *
+ * see lib/math/mean_and_variance_test.c for examples of usage.
+ *
+ * DO NOT access the mean and variance fields of the weighted variants directly.
+ * DO NOT change the weight after calling update.
+ */
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+
+#include "mean_and_variance.h"
+
+u128_u u128_div(u128_u n, u64 d)
+{
+ u128_u r;
+ u64 rem;
+ u64 hi = u128_hi(n);
+ u64 lo = u128_lo(n);
+ u64 h = hi & ((u64) U32_MAX << 32);
+ u64 l = (hi & (u64) U32_MAX) << 32;
+
+ r = u128_shl(u64_to_u128(div64_u64_rem(h, d, &rem)), 64);
+ r = u128_add(r, u128_shl(u64_to_u128(div64_u64_rem(l + (rem << 32), d, &rem)), 32));
+ r = u128_add(r, u64_to_u128(div64_u64_rem(lo + (rem << 32), d, &rem)));
+ return r;
+}
+EXPORT_SYMBOL_GPL(u128_div);
+
+/**
+ * mean_and_variance_get_mean() - get mean from @s
+ */
+s64 mean_and_variance_get_mean(struct mean_and_variance s)
+{
+ return s.n ? div64_u64(s.sum, s.n) : 0;
+}
+EXPORT_SYMBOL_GPL(mean_and_variance_get_mean);
+
+/**
+ * mean_and_variance_get_variance() - get variance from @s1
+ *
+ * see linked pdf equation 12.
+ */
+u64 mean_and_variance_get_variance(struct mean_and_variance s1)
+{
+ if (s1.n) {
+ u128_u s2 = u128_div(s1.sum_squares, s1.n);
+ u64 s3 = abs(mean_and_variance_get_mean(s1));
+
+ return u128_lo(u128_sub(s2, u128_square(s3)));
+ } else {
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(mean_and_variance_get_variance);
+
+/**
+ * mean_and_variance_get_stddev() - get standard deviation from @s
+ */
+u32 mean_and_variance_get_stddev(struct mean_and_variance s)
+{
+ return int_sqrt64(mean_and_variance_get_variance(s));
+}
+EXPORT_SYMBOL_GPL(mean_and_variance_get_stddev);
+
+/**
+ * mean_and_variance_weighted_update() - exponentially weighted variant of mean_and_variance_update()
+ * @s1: ..
+ * @s2: ..
+ *
+ * see linked pdf: function derived from equations 140-143 where alpha = 2^w.
+ * values are stored bitshifted for performance and added precision.
+ */
+void mean_and_variance_weighted_update(struct mean_and_variance_weighted *s, s64 x)
+{
+ // previous weighted variance.
+ u8 w = s->weight;
+ u64 var_w0 = s->variance;
+ // new value weighted.
+ s64 x_w = x << w;
+ s64 diff_w = x_w - s->mean;
+ s64 diff = fast_divpow2(diff_w, w);
+ // new mean weighted.
+ s64 u_w1 = s->mean + diff;
+
+ if (!s->init) {
+ s->mean = x_w;
+ s->variance = 0;
+ } else {
+ s->mean = u_w1;
+ s->variance = ((var_w0 << w) - var_w0 + ((diff_w * (x_w - u_w1)) >> w)) >> w;
+ }
+ s->init = true;
+}
+EXPORT_SYMBOL_GPL(mean_and_variance_weighted_update);
+
+/**
+ * mean_and_variance_weighted_get_mean() - get mean from @s
+ */
+s64 mean_and_variance_weighted_get_mean(struct mean_and_variance_weighted s)
+{
+ return fast_divpow2(s.mean, s.weight);
+}
+EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_mean);
+
+/**
+ * mean_and_variance_weighted_get_variance() -- get variance from @s
+ */
+u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s)
+{
+ // always positive don't need fast divpow2
+ return s.variance >> s.weight;
+}
+EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_variance);
+
+/**
+ * mean_and_variance_weighted_get_stddev() - get standard deviation from @s
+ */
+u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s)
+{
+ return int_sqrt64(mean_and_variance_weighted_get_variance(s));
+}
+EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_stddev);
+
+MODULE_AUTHOR("Daniel B. Hill");
+MODULE_LICENSE("GPL");
diff --git a/fs/bcachefs/mean_and_variance.h b/fs/bcachefs/mean_and_variance.h
new file mode 100644
index 000000000000..647505010b39
--- /dev/null
+++ b/fs/bcachefs/mean_and_variance.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef MEAN_AND_VARIANCE_H_
+#define MEAN_AND_VARIANCE_H_
+
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/math64.h>
+
+#define SQRT_U64_MAX 4294967295ULL
+
+/*
+ * u128_u: u128 user mode, because not all architectures support a real int128
+ * type
+ */
+
+#ifdef __SIZEOF_INT128__
+
+typedef struct {
+ unsigned __int128 v;
+} __aligned(16) u128_u;
+
+static inline u128_u u64_to_u128(u64 a)
+{
+ return (u128_u) { .v = a };
+}
+
+static inline u64 u128_lo(u128_u a)
+{
+ return a.v;
+}
+
+static inline u64 u128_hi(u128_u a)
+{
+ return a.v >> 64;
+}
+
+static inline u128_u u128_add(u128_u a, u128_u b)
+{
+ a.v += b.v;
+ return a;
+}
+
+static inline u128_u u128_sub(u128_u a, u128_u b)
+{
+ a.v -= b.v;
+ return a;
+}
+
+static inline u128_u u128_shl(u128_u a, s8 shift)
+{
+ a.v <<= shift;
+ return a;
+}
+
+static inline u128_u u128_square(u64 a)
+{
+ u128_u b = u64_to_u128(a);
+
+ b.v *= b.v;
+ return b;
+}
+
+#else
+
+typedef struct {
+ u64 hi, lo;
+} __aligned(16) u128_u;
+
+/* conversions */
+
+static inline u128_u u64_to_u128(u64 a)
+{
+ return (u128_u) { .lo = a };
+}
+
+static inline u64 u128_lo(u128_u a)
+{
+ return a.lo;
+}
+
+static inline u64 u128_hi(u128_u a)
+{
+ return a.hi;
+}
+
+/* arithmetic */
+
+static inline u128_u u128_add(u128_u a, u128_u b)
+{
+ u128_u c;
+
+ c.lo = a.lo + b.lo;
+ c.hi = a.hi + b.hi + (c.lo < a.lo);
+ return c;
+}
+
+static inline u128_u u128_sub(u128_u a, u128_u b)
+{
+ u128_u c;
+
+ c.lo = a.lo - b.lo;
+ c.hi = a.hi - b.hi - (c.lo > a.lo);
+ return c;
+}
+
+static inline u128_u u128_shl(u128_u i, s8 shift)
+{
+ u128_u r;
+
+ r.lo = i.lo << shift;
+ if (shift < 64)
+ r.hi = (i.hi << shift) | (i.lo >> (64 - shift));
+ else {
+ r.hi = i.lo << (shift - 64);
+ r.lo = 0;
+ }
+ return r;
+}
+
+static inline u128_u u128_square(u64 i)
+{
+ u128_u r;
+ u64 h = i >> 32, l = i & U32_MAX;
+
+ r = u128_shl(u64_to_u128(h*h), 64);
+ r = u128_add(r, u128_shl(u64_to_u128(h*l), 32));
+ r = u128_add(r, u128_shl(u64_to_u128(l*h), 32));
+ r = u128_add(r, u64_to_u128(l*l));
+ return r;
+}
+
+#endif
+
+static inline u128_u u64s_to_u128(u64 hi, u64 lo)
+{
+ u128_u c = u64_to_u128(hi);
+
+ c = u128_shl(c, 64);
+ c = u128_add(c, u64_to_u128(lo));
+ return c;
+}
+
+u128_u u128_div(u128_u n, u64 d);
+
+struct mean_and_variance {
+ s64 n;
+ s64 sum;
+ u128_u sum_squares;
+};
+
+/* expontentially weighted variant */
+struct mean_and_variance_weighted {
+ bool init;
+ u8 weight; /* base 2 logarithim */
+ s64 mean;
+ u64 variance;
+};
+
+/**
+ * fast_divpow2() - fast approximation for n / (1 << d)
+ * @n: numerator
+ * @d: the power of 2 denominator.
+ *
+ * note: this rounds towards 0.
+ */
+static inline s64 fast_divpow2(s64 n, u8 d)
+{
+ return (n + ((n < 0) ? ((1 << d) - 1) : 0)) >> d;
+}
+
+/**
+ * mean_and_variance_update() - update a mean_and_variance struct @s1 with a new sample @v1
+ * and return it.
+ * @s1: the mean_and_variance to update.
+ * @v1: the new sample.
+ *
+ * see linked pdf equation 12.
+ */
+static inline void
+mean_and_variance_update(struct mean_and_variance *s, s64 v)
+{
+ s->n++;
+ s->sum += v;
+ s->sum_squares = u128_add(s->sum_squares, u128_square(abs(v)));
+}
+
+s64 mean_and_variance_get_mean(struct mean_and_variance s);
+u64 mean_and_variance_get_variance(struct mean_and_variance s1);
+u32 mean_and_variance_get_stddev(struct mean_and_variance s);
+
+void mean_and_variance_weighted_update(struct mean_and_variance_weighted *s, s64 v);
+
+s64 mean_and_variance_weighted_get_mean(struct mean_and_variance_weighted s);
+u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s);
+u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s);
+
+#endif // MEAN_AND_VAIRANCE_H_
diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c
new file mode 100644
index 000000000000..019583c3ca0e
--- /dev/null
+++ b/fs/bcachefs/mean_and_variance_test.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+
+#include "mean_and_variance.h"
+
+#define MAX_SQR (SQRT_U64_MAX*SQRT_U64_MAX)
+
+static void mean_and_variance_basic_test(struct kunit *test)
+{
+ struct mean_and_variance s = {};
+
+ mean_and_variance_update(&s, 2);
+ mean_and_variance_update(&s, 2);
+
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(s), 2);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_variance(s), 0);
+ KUNIT_EXPECT_EQ(test, s.n, 2);
+
+ mean_and_variance_update(&s, 4);
+ mean_and_variance_update(&s, 4);
+
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(s), 3);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_variance(s), 1);
+ KUNIT_EXPECT_EQ(test, s.n, 4);
+}
+
+/*
+ * Test values computed using a spreadsheet from the psuedocode at the bottom:
+ * https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf
+ */
+
+static void mean_and_variance_weighted_test(struct kunit *test)
+{
+ struct mean_and_variance_weighted s = { .weight = 2 };
+
+ mean_and_variance_weighted_update(&s, 10);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), 10);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 0);
+
+ mean_and_variance_weighted_update(&s, 20);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), 12);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 18);
+
+ mean_and_variance_weighted_update(&s, 30);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), 16);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 72);
+
+ s = (struct mean_and_variance_weighted) { .weight = 2 };
+
+ mean_and_variance_weighted_update(&s, -10);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), -10);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 0);
+
+ mean_and_variance_weighted_update(&s, -20);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), -12);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 18);
+
+ mean_and_variance_weighted_update(&s, -30);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), -16);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 72);
+}
+
+static void mean_and_variance_weighted_advanced_test(struct kunit *test)
+{
+ struct mean_and_variance_weighted s = { .weight = 8 };
+ s64 i;
+
+ for (i = 10; i <= 100; i += 10)
+ mean_and_variance_weighted_update(&s, i);
+
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), 11);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 107);
+
+ s = (struct mean_and_variance_weighted) { .weight = 8 };
+
+ for (i = -10; i >= -100; i -= 10)
+ mean_and_variance_weighted_update(&s, i);
+
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(s), -11);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_variance(s), 107);
+}
+
+static void do_mean_and_variance_test(struct kunit *test,
+ s64 initial_value,
+ s64 initial_n,
+ s64 n,
+ unsigned weight,
+ s64 *data,
+ s64 *mean,
+ s64 *stddev,
+ s64 *weighted_mean,
+ s64 *weighted_stddev)
+{
+ struct mean_and_variance mv = {};
+ struct mean_and_variance_weighted vw = { .weight = weight };
+
+ for (unsigned i = 0; i < initial_n; i++) {
+ mean_and_variance_update(&mv, initial_value);
+ mean_and_variance_weighted_update(&vw, initial_value);
+
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(mv), initial_value);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_stddev(mv), 0);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(vw), initial_value);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_stddev(vw),0);
+ }
+
+ for (unsigned i = 0; i < n; i++) {
+ mean_and_variance_update(&mv, data[i]);
+ mean_and_variance_weighted_update(&vw, data[i]);
+
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_mean(mv), mean[i]);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_get_stddev(mv), stddev[i]);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_mean(vw), weighted_mean[i]);
+ KUNIT_EXPECT_EQ(test, mean_and_variance_weighted_get_stddev(vw),weighted_stddev[i]);
+ }
+
+ KUNIT_EXPECT_EQ(test, mv.n, initial_n + n);
+}
+
+/* Test behaviour with a single outlier, then back to steady state: */
+static void mean_and_variance_test_1(struct kunit *test)
+{
+ s64 d[] = { 100, 10, 10, 10, 10, 10, 10 };
+ s64 mean[] = { 22, 21, 20, 19, 18, 17, 16 };
+ s64 stddev[] = { 32, 29, 28, 27, 26, 25, 24 };
+ s64 weighted_mean[] = { 32, 27, 22, 19, 17, 15, 14 };
+ s64 weighted_stddev[] = { 38, 35, 31, 27, 24, 21, 18 };
+
+ do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
+ d, mean, stddev, weighted_mean, weighted_stddev);
+}
+
+static void mean_and_variance_test_2(struct kunit *test)
+{
+ s64 d[] = { 100, 10, 10, 10, 10, 10, 10 };
+ s64 mean[] = { 10, 10, 10, 10, 10, 10, 10 };
+ s64 stddev[] = { 9, 9, 9, 9, 9, 9, 9 };
+ s64 weighted_mean[] = { 32, 27, 22, 19, 17, 15, 14 };
+ s64 weighted_stddev[] = { 38, 35, 31, 27, 24, 21, 18 };
+
+ do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
+ d, mean, stddev, weighted_mean, weighted_stddev);
+}
+
+/* Test behaviour where we switch from one steady state to another: */
+static void mean_and_variance_test_3(struct kunit *test)
+{
+ s64 d[] = { 100, 100, 100, 100, 100 };
+ s64 mean[] = { 22, 32, 40, 46, 50 };
+ s64 stddev[] = { 32, 39, 42, 44, 45 };
+ s64 weighted_mean[] = { 32, 49, 61, 71, 78 };
+ s64 weighted_stddev[] = { 38, 44, 44, 41, 38 };
+
+ do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
+ d, mean, stddev, weighted_mean, weighted_stddev);
+}
+
+static void mean_and_variance_test_4(struct kunit *test)
+{
+ s64 d[] = { 100, 100, 100, 100, 100 };
+ s64 mean[] = { 10, 11, 12, 13, 14 };
+ s64 stddev[] = { 9, 13, 15, 17, 19 };
+ s64 weighted_mean[] = { 32, 49, 61, 71, 78 };
+ s64 weighted_stddev[] = { 38, 44, 44, 41, 38 };
+
+ do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
+ d, mean, stddev, weighted_mean, weighted_stddev);
+}
+
+static void mean_and_variance_fast_divpow2(struct kunit *test)
+{
+ s64 i;
+ u8 d;
+
+ for (i = 0; i < 100; i++) {
+ d = 0;
+ KUNIT_EXPECT_EQ(test, fast_divpow2(i, d), div_u64(i, 1LLU << d));
+ KUNIT_EXPECT_EQ(test, abs(fast_divpow2(-i, d)), div_u64(i, 1LLU << d));
+ for (d = 1; d < 32; d++) {
+ KUNIT_EXPECT_EQ_MSG(test, abs(fast_divpow2(i, d)),
+ div_u64(i, 1 << d), "%lld %u", i, d);
+ KUNIT_EXPECT_EQ_MSG(test, abs(fast_divpow2(-i, d)),
+ div_u64(i, 1 << d), "%lld %u", -i, d);
+ }
+ }
+}
+
+static void mean_and_variance_u128_basic_test(struct kunit *test)
+{
+ u128_u a = u64s_to_u128(0, U64_MAX);
+ u128_u a1 = u64s_to_u128(0, 1);
+ u128_u b = u64s_to_u128(1, 0);
+ u128_u c = u64s_to_u128(0, 1LLU << 63);
+ u128_u c2 = u64s_to_u128(U64_MAX, U64_MAX);
+
+ KUNIT_EXPECT_EQ(test, u128_hi(u128_add(a, a1)), 1);
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_add(a, a1)), 0);
+ KUNIT_EXPECT_EQ(test, u128_hi(u128_add(a1, a)), 1);
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_add(a1, a)), 0);
+
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_sub(b, a1)), U64_MAX);
+ KUNIT_EXPECT_EQ(test, u128_hi(u128_sub(b, a1)), 0);
+
+ KUNIT_EXPECT_EQ(test, u128_hi(u128_shl(c, 1)), 1);
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_shl(c, 1)), 0);
+
+ KUNIT_EXPECT_EQ(test, u128_hi(u128_square(U64_MAX)), U64_MAX - 1);
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_square(U64_MAX)), 1);
+
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_div(b, 2)), 1LLU << 63);
+
+ KUNIT_EXPECT_EQ(test, u128_hi(u128_div(c2, 2)), U64_MAX >> 1);
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_div(c2, 2)), U64_MAX);
+
+ KUNIT_EXPECT_EQ(test, u128_hi(u128_div(u128_shl(u64_to_u128(U64_MAX), 32), 2)), U32_MAX >> 1);
+ KUNIT_EXPECT_EQ(test, u128_lo(u128_div(u128_shl(u64_to_u128(U64_MAX), 32), 2)), U64_MAX << 31);
+}
+
+static struct kunit_case mean_and_variance_test_cases[] = {
+ KUNIT_CASE(mean_and_variance_fast_divpow2),
+ KUNIT_CASE(mean_and_variance_u128_basic_test),
+ KUNIT_CASE(mean_and_variance_basic_test),
+ KUNIT_CASE(mean_and_variance_weighted_test),
+ KUNIT_CASE(mean_and_variance_weighted_advanced_test),
+ KUNIT_CASE(mean_and_variance_test_1),
+ KUNIT_CASE(mean_and_variance_test_2),
+ KUNIT_CASE(mean_and_variance_test_3),
+ KUNIT_CASE(mean_and_variance_test_4),
+ {}
+};
+
+static struct kunit_suite mean_and_variance_test_suite = {
+ .name = "mean and variance tests",
+ .test_cases = mean_and_variance_test_cases
+};
+
+kunit_test_suite(mean_and_variance_test_suite);
+
+MODULE_AUTHOR("Daniel B. Hill");
+MODULE_LICENSE("GPL");
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
new file mode 100644
index 000000000000..e3a51f6d6c9b
--- /dev/null
+++ b/fs/bcachefs/migrate.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code for moving data off a device.
+ */
+
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
+#include "errcode.h"
+#include "extents.h"
+#include "io_write.h"
+#include "journal.h"
+#include "keylist.h"
+#include "migrate.h"
+#include "move.h"
+#include "replicas.h"
+#include "super-io.h"
+
+static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k,
+ unsigned dev_idx, int flags, bool metadata)
+{
+ unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
+ unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST;
+ unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
+ unsigned nr_good;
+
+ bch2_bkey_drop_device(k, dev_idx);
+
+ nr_good = bch2_bkey_durability(c, k.s_c);
+ if ((!nr_good && !(flags & lost)) ||
+ (nr_good < replicas && !(flags & degraded)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ unsigned dev_idx,
+ int flags)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i *n;
+ int ret;
+
+ if (!bch2_bkey_has_device_c(k, dev_idx))
+ return 0;
+
+ n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ return ret;
+
+ ret = drop_dev_ptrs(c, bkey_i_to_s(n), dev_idx, flags, false);
+ if (ret)
+ return ret;
+
+ /*
+ * If the new extent no longer has any pointers, bch2_extent_normalize()
+ * will do the appropriate thing with it (turning it into a
+ * KEY_TYPE_error key, or just a discard if it was a cached extent)
+ */
+ bch2_extent_normalize(c, bkey_i_to_s(n));
+
+ /*
+ * Since we're not inserting through an extent iterator
+ * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
+ * we aren't using the extent overwrite path to delete, we're
+ * just using the normal key deletion path:
+ */
+ if (bkey_deleted(&n->k))
+ n->k.size = 0;
+ return 0;
+}
+
+static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ enum btree_id id;
+ int ret = 0;
+
+ for (id = 0; id < BTREE_ID_NR; id++) {
+ if (!btree_type_has_ptrs(id))
+ continue;
+
+ ret = for_each_btree_key_commit(trans, iter, id, POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags));
+ if (ret)
+ break;
+ }
+
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct closure cl;
+ struct btree *b;
+ struct bkey_buf k;
+ unsigned id;
+ int ret;
+
+ /* don't handle this yet: */
+ if (flags & BCH_FORCE_IF_METADATA_LOST)
+ return -EINVAL;
+
+ trans = bch2_trans_get(c);
+ bch2_bkey_buf_init(&k);
+ closure_init_stack(&cl);
+
+ for (id = 0; id < BTREE_ID_NR; id++) {
+ bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0,
+ BTREE_ITER_PREFETCH);
+retry:
+ ret = 0;
+ while (bch2_trans_begin(trans),
+ (b = bch2_btree_iter_peek_node(&iter)) &&
+ !(ret = PTR_ERR_OR_ZERO(b))) {
+ if (!bch2_bkey_has_device_c(bkey_i_to_s_c(&b->key), dev_idx))
+ goto next;
+
+ bch2_bkey_buf_copy(&k, c, &b->key);
+
+ ret = drop_dev_ptrs(c, bkey_i_to_s(k.k),
+ dev_idx, flags, true);
+ if (ret) {
+ bch_err(c, "Cannot drop device without losing data");
+ break;
+ }
+
+ ret = bch2_btree_node_update_key(trans, &iter, b, k.k, 0, false);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ ret = 0;
+ continue;
+ }
+
+ if (ret) {
+ bch_err_msg(c, ret, "updating btree node key");
+ break;
+ }
+next:
+ bch2_btree_iter_next_node(&iter);
+ }
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ goto err;
+ }
+
+ bch2_btree_interior_updates_flush(c);
+ ret = 0;
+err:
+ bch2_bkey_buf_exit(&k, c);
+ bch2_trans_put(trans);
+
+ BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
+
+ return ret;
+}
+
+int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
+{
+ return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
+ bch2_dev_metadata_drop(c, dev_idx, flags);
+}
diff --git a/fs/bcachefs/migrate.h b/fs/bcachefs/migrate.h
new file mode 100644
index 000000000000..027efaa0d575
--- /dev/null
+++ b/fs/bcachefs/migrate.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_MIGRATE_H
+#define _BCACHEFS_MIGRATE_H
+
+int bch2_dev_data_drop(struct bch_fs *, unsigned, int);
+
+#endif /* _BCACHEFS_MIGRATE_H */
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
new file mode 100644
index 000000000000..39a14e321680
--- /dev/null
+++ b/fs/bcachefs/move.c
@@ -0,0 +1,1159 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "backpointers.h"
+#include "bkey_buf.h"
+#include "btree_gc.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "btree_write_buffer.h"
+#include "disk_groups.h"
+#include "ec.h"
+#include "errcode.h"
+#include "error.h"
+#include "inode.h"
+#include "io_read.h"
+#include "io_write.h"
+#include "journal_reclaim.h"
+#include "keylist.h"
+#include "move.h"
+#include "replicas.h"
+#include "super-io.h"
+#include "trace.h"
+
+#include <linux/ioprio.h>
+#include <linux/kthread.h>
+
+static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
+static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_read_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent_read(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
+static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c k)
+{
+ if (trace_move_extent_alloc_mem_fail_enabled()) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, k);
+ trace_move_extent_alloc_mem_fail(c, buf.buf);
+ printbuf_exit(&buf);
+ }
+}
+
+static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats)
+{
+ mutex_lock(&c->data_progress_lock);
+ list_add(&stats->list, &c->data_progress_list);
+ mutex_unlock(&c->data_progress_lock);
+}
+
+static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats)
+{
+ mutex_lock(&c->data_progress_lock);
+ list_del(&stats->list);
+ mutex_unlock(&c->data_progress_lock);
+}
+
+struct moving_io {
+ struct list_head read_list;
+ struct list_head io_list;
+ struct move_bucket_in_flight *b;
+ struct closure cl;
+ bool read_completed;
+
+ unsigned read_sectors;
+ unsigned write_sectors;
+
+ struct bch_read_bio rbio;
+
+ struct data_update write;
+ /* Must be last since it is variable size */
+ struct bio_vec bi_inline_vecs[0];
+};
+
+static void move_free(struct moving_io *io)
+{
+ struct moving_context *ctxt = io->write.ctxt;
+
+ if (io->b)
+ atomic_dec(&io->b->count);
+
+ bch2_data_update_exit(&io->write);
+
+ mutex_lock(&ctxt->lock);
+ list_del(&io->io_list);
+ wake_up(&ctxt->wait);
+ mutex_unlock(&ctxt->lock);
+
+ kfree(io);
+}
+
+static void move_write_done(struct bch_write_op *op)
+{
+ struct moving_io *io = container_of(op, struct moving_io, write.op);
+ struct moving_context *ctxt = io->write.ctxt;
+
+ if (io->write.op.error)
+ ctxt->write_error = true;
+
+ atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
+ atomic_dec(&io->write.ctxt->write_ios);
+ move_free(io);
+ closure_put(&ctxt->cl);
+}
+
+static void move_write(struct moving_io *io)
+{
+ if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
+ move_free(io);
+ return;
+ }
+
+ closure_get(&io->write.ctxt->cl);
+ atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
+ atomic_inc(&io->write.ctxt->write_ios);
+
+ bch2_data_update_read_done(&io->write, io->rbio.pick.crc);
+}
+
+struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *ctxt)
+{
+ struct moving_io *io =
+ list_first_entry_or_null(&ctxt->reads, struct moving_io, read_list);
+
+ return io && io->read_completed ? io : NULL;
+}
+
+static void move_read_endio(struct bio *bio)
+{
+ struct moving_io *io = container_of(bio, struct moving_io, rbio.bio);
+ struct moving_context *ctxt = io->write.ctxt;
+
+ atomic_sub(io->read_sectors, &ctxt->read_sectors);
+ atomic_dec(&ctxt->read_ios);
+ io->read_completed = true;
+
+ wake_up(&ctxt->wait);
+ closure_put(&ctxt->cl);
+}
+
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt,
+ struct btree_trans *trans)
+{
+ struct moving_io *io;
+
+ if (trans)
+ bch2_trans_unlock(trans);
+
+ while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
+ list_del(&io->read_list);
+ move_write(io);
+ }
+}
+
+static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
+ struct btree_trans *trans)
+{
+ unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
+
+ move_ctxt_wait_event(ctxt, trans,
+ !atomic_read(&ctxt->write_sectors) ||
+ atomic_read(&ctxt->write_sectors) != sectors_pending);
+}
+
+void bch2_moving_ctxt_exit(struct moving_context *ctxt)
+{
+ struct bch_fs *c = ctxt->c;
+
+ move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
+ closure_sync(&ctxt->cl);
+
+ EBUG_ON(atomic_read(&ctxt->write_sectors));
+ EBUG_ON(atomic_read(&ctxt->write_ios));
+ EBUG_ON(atomic_read(&ctxt->read_sectors));
+ EBUG_ON(atomic_read(&ctxt->read_ios));
+
+ if (ctxt->stats) {
+ progress_list_del(c, ctxt->stats);
+ trace_move_data(c,
+ atomic64_read(&ctxt->stats->sectors_moved),
+ atomic64_read(&ctxt->stats->keys_moved));
+ }
+
+ mutex_lock(&c->moving_context_lock);
+ list_del(&ctxt->list);
+ mutex_unlock(&c->moving_context_lock);
+}
+
+void bch2_moving_ctxt_init(struct moving_context *ctxt,
+ struct bch_fs *c,
+ struct bch_ratelimit *rate,
+ struct bch_move_stats *stats,
+ struct write_point_specifier wp,
+ bool wait_on_copygc)
+{
+ memset(ctxt, 0, sizeof(*ctxt));
+
+ ctxt->c = c;
+ ctxt->fn = (void *) _RET_IP_;
+ ctxt->rate = rate;
+ ctxt->stats = stats;
+ ctxt->wp = wp;
+ ctxt->wait_on_copygc = wait_on_copygc;
+
+ closure_init_stack(&ctxt->cl);
+
+ mutex_init(&ctxt->lock);
+ INIT_LIST_HEAD(&ctxt->reads);
+ INIT_LIST_HEAD(&ctxt->ios);
+ init_waitqueue_head(&ctxt->wait);
+
+ mutex_lock(&c->moving_context_lock);
+ list_add(&ctxt->list, &c->moving_context_list);
+ mutex_unlock(&c->moving_context_lock);
+
+ if (stats) {
+ progress_list_add(c, stats);
+ stats->data_type = BCH_DATA_user;
+ }
+}
+
+void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
+{
+ memset(stats, 0, sizeof(*stats));
+ scnprintf(stats->name, sizeof(stats->name), "%s", name);
+}
+
+static int bch2_extent_drop_ptrs(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct data_update_opts data_opts)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i *n;
+ int ret;
+
+ n = bch2_bkey_make_mut_noupdate(trans, k);
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ return ret;
+
+ while (data_opts.kill_ptrs) {
+ unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
+ struct bch_extent_ptr *ptr;
+
+ bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
+ data_opts.kill_ptrs ^= 1U << drop;
+ }
+
+ /*
+ * If the new extent no longer has any pointers, bch2_extent_normalize()
+ * will do the appropriate thing with it (turning it into a
+ * KEY_TYPE_error key, or just a discard if it was a cached extent)
+ */
+ bch2_extent_normalize(c, bkey_i_to_s(n));
+
+ /*
+ * Since we're not inserting through an extent iterator
+ * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
+ * we aren't using the extent overwrite path to delete, we're
+ * just using the normal key deletion path:
+ */
+ if (bkey_deleted(&n->k))
+ n->k.size = 0;
+
+ return bch2_trans_relock(trans) ?:
+ bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
+}
+
+static int bch2_move_extent(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct moving_context *ctxt,
+ struct move_bucket_in_flight *bucket_in_flight,
+ struct bch_io_opts io_opts,
+ enum btree_id btree_id,
+ struct bkey_s_c k,
+ struct data_update_opts data_opts)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct moving_io *io;
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ unsigned sectors = k.k->size, pages;
+ int ret = -ENOMEM;
+
+ trace_move_extent2(c, k);
+
+ bch2_data_update_opts_normalize(k, &data_opts);
+
+ if (!data_opts.rewrite_ptrs &&
+ !data_opts.extra_replicas) {
+ if (data_opts.kill_ptrs)
+ return bch2_extent_drop_ptrs(trans, iter, k, data_opts);
+ return 0;
+ }
+
+ /*
+ * Before memory allocations & taking nocow locks in
+ * bch2_data_update_init():
+ */
+ bch2_trans_unlock(trans);
+
+ /* write path might have to decompress data: */
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
+ sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
+
+ pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
+ io = kzalloc(sizeof(struct moving_io) +
+ sizeof(struct bio_vec) * pages, GFP_KERNEL);
+ if (!io)
+ goto err;
+
+ INIT_LIST_HEAD(&io->io_list);
+ io->write.ctxt = ctxt;
+ io->read_sectors = k.k->size;
+ io->write_sectors = k.k->size;
+
+ bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
+ bio_set_prio(&io->write.op.wbio.bio,
+ IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+
+ if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9,
+ GFP_KERNEL))
+ goto err_free;
+
+ io->rbio.c = c;
+ io->rbio.opts = io_opts;
+ bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
+ io->rbio.bio.bi_vcnt = pages;
+ bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
+ io->rbio.bio.bi_iter.bi_size = sectors << 9;
+
+ io->rbio.bio.bi_opf = REQ_OP_READ;
+ io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
+ io->rbio.bio.bi_end_io = move_read_endio;
+
+ ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp,
+ io_opts, data_opts, btree_id, k);
+ if (ret && ret != -BCH_ERR_unwritten_extent_update)
+ goto err_free_pages;
+
+ if (ret == -BCH_ERR_unwritten_extent_update) {
+ bch2_update_unwritten_extent(trans, &io->write);
+ move_free(io);
+ return 0;
+ }
+
+ BUG_ON(ret);
+
+ io->write.ctxt = ctxt;
+ io->write.op.end_io = move_write_done;
+
+ if (ctxt->stats) {
+ atomic64_inc(&ctxt->stats->keys_moved);
+ atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
+ }
+
+ if (bucket_in_flight) {
+ io->b = bucket_in_flight;
+ atomic_inc(&io->b->count);
+ }
+
+ this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
+ this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
+ trace_move_extent_read2(c, k);
+
+ mutex_lock(&ctxt->lock);
+ atomic_add(io->read_sectors, &ctxt->read_sectors);
+ atomic_inc(&ctxt->read_ios);
+
+ list_add_tail(&io->read_list, &ctxt->reads);
+ list_add_tail(&io->io_list, &ctxt->ios);
+ mutex_unlock(&ctxt->lock);
+
+ /*
+ * dropped by move_read_endio() - guards against use after free of
+ * ctxt when doing wakeup
+ */
+ closure_get(&ctxt->cl);
+ bch2_read_extent(trans, &io->rbio,
+ bkey_start_pos(k.k),
+ btree_id, k, 0,
+ BCH_READ_NODECODE|
+ BCH_READ_LAST_FRAGMENT);
+ return 0;
+err_free_pages:
+ bio_free_pages(&io->write.op.wbio.bio);
+err_free:
+ kfree(io);
+err:
+ this_cpu_inc(c->counters[BCH_COUNTER_move_extent_alloc_mem_fail]);
+ trace_move_extent_alloc_mem_fail2(c, k);
+ return ret;
+}
+
+static int lookup_inode(struct btree_trans *trans, struct bpos pos,
+ struct bch_inode_unpacked *inode)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, pos,
+ BTREE_ITER_ALL_SNAPSHOTS);
+ k = bch2_btree_iter_peek(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!k.k || !bkey_eq(k.k->p, pos)) {
+ ret = -BCH_ERR_ENOENT_inode;
+ goto err;
+ }
+
+ ret = bkey_is_inode(k.k) ? 0 : -EIO;
+ if (ret)
+ goto err;
+
+ ret = bch2_inode_unpack(k, inode);
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int move_ratelimit(struct btree_trans *trans,
+ struct moving_context *ctxt)
+{
+ struct bch_fs *c = trans->c;
+ u64 delay;
+
+ if (ctxt->wait_on_copygc) {
+ bch2_trans_unlock(trans);
+ wait_event_killable(c->copygc_running_wq,
+ !c->copygc_running ||
+ kthread_should_stop());
+ }
+
+ do {
+ delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
+
+ if (delay) {
+ bch2_trans_unlock(trans);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+
+ if ((current->flags & PF_KTHREAD) && kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ return 1;
+ }
+
+ if (delay)
+ schedule_timeout(delay);
+
+ if (unlikely(freezing(current))) {
+ move_ctxt_wait_event(ctxt, trans, list_empty(&ctxt->reads));
+ try_to_freeze();
+ }
+ } while (delay);
+
+ /*
+ * XXX: these limits really ought to be per device, SSDs and hard drives
+ * will want different limits
+ */
+ move_ctxt_wait_event(ctxt, trans,
+ atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 &&
+ atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 &&
+ atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight &&
+ atomic_read(&ctxt->read_ios) < c->opts.move_ios_in_flight);
+
+ return 0;
+}
+
+static int move_get_io_opts(struct btree_trans *trans,
+ struct bch_io_opts *io_opts,
+ struct bkey_s_c k, u64 *cur_inum)
+{
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ if (*cur_inum == k.k->p.inode)
+ return 0;
+
+ ret = lookup_inode(trans,
+ SPOS(0, k.k->p.inode, k.k->p.snapshot),
+ &inode);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ return ret;
+
+ if (!ret)
+ bch2_inode_opts_get(io_opts, trans->c, &inode);
+ else
+ *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
+ *cur_inum = k.k->p.inode;
+ return 0;
+}
+
+static int __bch2_move_data(struct moving_context *ctxt,
+ struct bpos start,
+ struct bpos end,
+ move_pred_fn pred, void *arg,
+ enum btree_id btree_id)
+{
+ struct bch_fs *c = ctxt->c;
+ struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
+ struct bkey_buf sk;
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct data_update_opts data_opts;
+ u64 cur_inum = U64_MAX;
+ int ret = 0, ret2;
+
+ bch2_bkey_buf_init(&sk);
+
+ if (ctxt->stats) {
+ ctxt->stats->data_type = BCH_DATA_user;
+ ctxt->stats->btree_id = btree_id;
+ ctxt->stats->pos = start;
+ }
+
+ bch2_trans_iter_init(trans, &iter, btree_id, start,
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS);
+
+ if (ctxt->rate)
+ bch2_ratelimit_reset(ctxt->rate);
+
+ while (!move_ratelimit(trans, ctxt)) {
+ bch2_trans_begin(trans);
+
+ k = bch2_btree_iter_peek(&iter);
+ if (!k.k)
+ break;
+
+ ret = bkey_err(k);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+
+ if (bkey_ge(bkey_start_pos(k.k), end))
+ break;
+
+ if (ctxt->stats)
+ ctxt->stats->pos = iter.pos;
+
+ if (!bkey_extent_is_direct_data(k.k))
+ goto next_nondata;
+
+ ret = move_get_io_opts(trans, &io_opts, k, &cur_inum);
+ if (ret)
+ continue;
+
+ memset(&data_opts, 0, sizeof(data_opts));
+ if (!pred(c, arg, k, &io_opts, &data_opts))
+ goto next;
+
+ /*
+ * The iterator gets unlocked by __bch2_read_extent - need to
+ * save a copy of @k elsewhere:
+ */
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
+
+ ret2 = bch2_move_extent(trans, &iter, ctxt, NULL,
+ io_opts, btree_id, k, data_opts);
+ if (ret2) {
+ if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
+ continue;
+
+ if (ret2 == -ENOMEM) {
+ /* memory allocation failure, wait for some IO to finish */
+ bch2_move_ctxt_wait_for_io(ctxt, trans);
+ continue;
+ }
+
+ /* XXX signal failure */
+ goto next;
+ }
+
+ if (ctxt->rate)
+ bch2_ratelimit_increment(ctxt->rate, k.k->size);
+next:
+ if (ctxt->stats)
+ atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+next_nondata:
+ bch2_btree_iter_advance(&iter);
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&sk, c);
+
+ return ret;
+}
+
+int bch2_move_data(struct bch_fs *c,
+ enum btree_id start_btree_id, struct bpos start_pos,
+ enum btree_id end_btree_id, struct bpos end_pos,
+ struct bch_ratelimit *rate,
+ struct bch_move_stats *stats,
+ struct write_point_specifier wp,
+ bool wait_on_copygc,
+ move_pred_fn pred, void *arg)
+{
+ struct moving_context ctxt;
+ enum btree_id id;
+ int ret = 0;
+
+ bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
+
+ for (id = start_btree_id;
+ id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1);
+ id++) {
+ stats->btree_id = id;
+
+ if (id != BTREE_ID_extents &&
+ id != BTREE_ID_reflink)
+ continue;
+
+ if (!bch2_btree_id_root(c, id)->b)
+ continue;
+
+ ret = __bch2_move_data(&ctxt,
+ id == start_btree_id ? start_pos : POS_MIN,
+ id == end_btree_id ? end_pos : POS_MAX,
+ pred, arg, id);
+ if (ret)
+ break;
+ }
+
+ bch2_moving_ctxt_exit(&ctxt);
+
+ return ret;
+}
+
+int __bch2_evacuate_bucket(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct move_bucket_in_flight *bucket_in_flight,
+ struct bpos bucket, int gen,
+ struct data_update_opts _data_opts)
+{
+ struct bch_fs *c = ctxt->c;
+ struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
+ struct btree_iter iter;
+ struct bkey_buf sk;
+ struct bch_backpointer bp;
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a;
+ struct bkey_s_c k;
+ struct data_update_opts data_opts;
+ unsigned dirty_sectors, bucket_size;
+ u64 fragmentation;
+ u64 cur_inum = U64_MAX;
+ struct bpos bp_pos = POS_MIN;
+ int ret = 0;
+
+ trace_bucket_evacuate(c, &bucket);
+
+ bch2_bkey_buf_init(&sk);
+
+ /*
+ * We're not run in a context that handles transaction restarts:
+ */
+ bch2_trans_begin(trans);
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+ bucket, BTREE_ITER_CACHED);
+ ret = lockrestart_do(trans,
+ bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret) {
+ bch_err_msg(c, ret, "looking up alloc key");
+ goto err;
+ }
+
+ a = bch2_alloc_to_v4(k, &a_convert);
+ dirty_sectors = a->dirty_sectors;
+ bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
+ fragmentation = a->fragmentation_lru;
+
+ ret = bch2_btree_write_buffer_flush(trans);
+ if (ret) {
+ bch_err_msg(c, ret, "flushing btree write buffer");
+ goto err;
+ }
+
+ while (!(ret = move_ratelimit(trans, ctxt))) {
+ bch2_trans_begin(trans);
+
+ ret = bch2_get_next_backpointer(trans, bucket, gen,
+ &bp_pos, &bp,
+ BTREE_ITER_CACHED);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ goto err;
+ if (bkey_eq(bp_pos, POS_MAX))
+ break;
+
+ if (!bp.level) {
+ const struct bch_extent_ptr *ptr;
+ unsigned i = 0;
+
+ k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
+ ret = bkey_err(k);
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ goto err;
+ if (!k.k)
+ goto next;
+
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
+
+ ret = move_get_io_opts(trans, &io_opts, k, &cur_inum);
+ if (ret) {
+ bch2_trans_iter_exit(trans, &iter);
+ continue;
+ }
+
+ data_opts = _data_opts;
+ data_opts.target = io_opts.background_target;
+ data_opts.rewrite_ptrs = 0;
+
+ bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
+ if (ptr->dev == bucket.inode) {
+ data_opts.rewrite_ptrs |= 1U << i;
+ if (ptr->cached) {
+ bch2_trans_iter_exit(trans, &iter);
+ goto next;
+ }
+ }
+ i++;
+ }
+
+ ret = bch2_move_extent(trans, &iter, ctxt,
+ bucket_in_flight,
+ io_opts, bp.btree_id, k, data_opts);
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret == -ENOMEM) {
+ /* memory allocation failure, wait for some IO to finish */
+ bch2_move_ctxt_wait_for_io(ctxt, trans);
+ continue;
+ }
+ if (ret)
+ goto err;
+
+ if (ctxt->rate)
+ bch2_ratelimit_increment(ctxt->rate, k.k->size);
+ if (ctxt->stats)
+ atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+ } else {
+ struct btree *b;
+
+ b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp);
+ ret = PTR_ERR_OR_ZERO(b);
+ if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
+ continue;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ goto err;
+ if (!b)
+ goto next;
+
+ ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ goto err;
+
+ if (ctxt->rate)
+ bch2_ratelimit_increment(ctxt->rate,
+ c->opts.btree_node_size >> 9);
+ if (ctxt->stats) {
+ atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
+ atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
+ }
+ }
+next:
+ bp_pos = bpos_nosnap_successor(bp_pos);
+ }
+
+ trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret);
+err:
+ bch2_bkey_buf_exit(&sk, c);
+ return ret;
+}
+
+int bch2_evacuate_bucket(struct bch_fs *c,
+ struct bpos bucket, int gen,
+ struct data_update_opts data_opts,
+ struct bch_ratelimit *rate,
+ struct bch_move_stats *stats,
+ struct write_point_specifier wp,
+ bool wait_on_copygc)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct moving_context ctxt;
+ int ret;
+
+ bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
+ ret = __bch2_evacuate_bucket(trans, &ctxt, NULL, bucket, gen, data_opts);
+ bch2_moving_ctxt_exit(&ctxt);
+ bch2_trans_put(trans);
+
+ return ret;
+}
+
+typedef bool (*move_btree_pred)(struct bch_fs *, void *,
+ struct btree *, struct bch_io_opts *,
+ struct data_update_opts *);
+
+static int bch2_move_btree(struct bch_fs *c,
+ enum btree_id start_btree_id, struct bpos start_pos,
+ enum btree_id end_btree_id, struct bpos end_pos,
+ move_btree_pred pred, void *arg,
+ struct bch_move_stats *stats)
+{
+ bool kthread = (current->flags & PF_KTHREAD) != 0;
+ struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct btree *b;
+ enum btree_id id;
+ struct data_update_opts data_opts;
+ int ret = 0;
+
+ progress_list_add(c, stats);
+
+ stats->data_type = BCH_DATA_btree;
+
+ for (id = start_btree_id;
+ id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1);
+ id++) {
+ stats->btree_id = id;
+
+ if (!bch2_btree_id_root(c, id)->b)
+ continue;
+
+ bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0,
+ BTREE_ITER_PREFETCH);
+retry:
+ ret = 0;
+ while (bch2_trans_begin(trans),
+ (b = bch2_btree_iter_peek_node(&iter)) &&
+ !(ret = PTR_ERR_OR_ZERO(b))) {
+ if (kthread && kthread_should_stop())
+ break;
+
+ if ((cmp_int(id, end_btree_id) ?:
+ bpos_cmp(b->key.k.p, end_pos)) > 0)
+ break;
+
+ stats->pos = iter.pos;
+
+ if (!pred(c, arg, b, &io_opts, &data_opts))
+ goto next;
+
+ ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret;
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ continue;
+ if (ret)
+ break;
+next:
+ bch2_btree_iter_next_node(&iter);
+ }
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (kthread && kthread_should_stop())
+ break;
+ }
+
+ bch2_trans_put(trans);
+
+ if (ret)
+ bch_err_fn(c, ret);
+
+ bch2_btree_interior_updates_flush(c);
+
+ progress_list_del(c, stats);
+ return ret;
+}
+
+static bool rereplicate_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ unsigned nr_good = bch2_bkey_durability(c, k);
+ unsigned replicas = bkey_is_btree_ptr(k.k)
+ ? c->opts.metadata_replicas
+ : io_opts->data_replicas;
+
+ if (!nr_good || nr_good >= replicas)
+ return false;
+
+ data_opts->target = 0;
+ data_opts->extra_replicas = replicas - nr_good;
+ data_opts->btree_insert_flags = 0;
+ return true;
+}
+
+static bool migrate_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+ struct bch_ioctl_data *op = arg;
+ unsigned i = 0;
+
+ data_opts->rewrite_ptrs = 0;
+ data_opts->target = 0;
+ data_opts->extra_replicas = 0;
+ data_opts->btree_insert_flags = 0;
+
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (ptr->dev == op->migrate.dev)
+ data_opts->rewrite_ptrs |= 1U << i;
+ i++;
+ }
+
+ return data_opts->rewrite_ptrs != 0;
+}
+
+static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
+ struct btree *b,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ return rereplicate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
+}
+
+static bool migrate_btree_pred(struct bch_fs *c, void *arg,
+ struct btree *b,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
+}
+
+static bool bformat_needs_redo(struct bkey_format *f)
+{
+ unsigned i;
+
+ for (i = 0; i < f->nr_fields; i++) {
+ unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
+ u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1));
+ u64 field_offset = le64_to_cpu(f->field_offset[i]);
+
+ if (f->bits_per_field[i] > unpacked_bits)
+ return true;
+
+ if ((f->bits_per_field[i] == unpacked_bits) && field_offset)
+ return true;
+
+ if (((field_offset + ((1ULL << f->bits_per_field[i]) - 1)) &
+ unpacked_mask) <
+ field_offset)
+ return true;
+ }
+
+ return false;
+}
+
+static bool rewrite_old_nodes_pred(struct bch_fs *c, void *arg,
+ struct btree *b,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ if (b->version_ondisk != c->sb.version ||
+ btree_node_need_rewrite(b) ||
+ bformat_needs_redo(&b->format)) {
+ data_opts->target = 0;
+ data_opts->extra_replicas = 0;
+ data_opts->btree_insert_flags = 0;
+ return true;
+ }
+
+ return false;
+}
+
+int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
+{
+ int ret;
+
+ ret = bch2_move_btree(c,
+ 0, POS_MIN,
+ BTREE_ID_NR, SPOS_MAX,
+ rewrite_old_nodes_pred, c, stats);
+ if (!ret) {
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
+ c->disk_sb.sb->version_min = c->disk_sb.sb->version;
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+int bch2_data_job(struct bch_fs *c,
+ struct bch_move_stats *stats,
+ struct bch_ioctl_data op)
+{
+ int ret = 0;
+
+ switch (op.op) {
+ case BCH_DATA_OP_REREPLICATE:
+ bch2_move_stats_init(stats, "rereplicate");
+ stats->data_type = BCH_DATA_journal;
+ ret = bch2_journal_flush_device_pins(&c->journal, -1);
+
+ ret = bch2_move_btree(c,
+ op.start_btree, op.start_pos,
+ op.end_btree, op.end_pos,
+ rereplicate_btree_pred, c, stats) ?: ret;
+ ret = bch2_replicas_gc2(c) ?: ret;
+
+ ret = bch2_move_data(c,
+ op.start_btree, op.start_pos,
+ op.end_btree, op.end_pos,
+ NULL,
+ stats,
+ writepoint_hashed((unsigned long) current),
+ true,
+ rereplicate_pred, c) ?: ret;
+ ret = bch2_replicas_gc2(c) ?: ret;
+ break;
+ case BCH_DATA_OP_MIGRATE:
+ if (op.migrate.dev >= c->sb.nr_devices)
+ return -EINVAL;
+
+ bch2_move_stats_init(stats, "migrate");
+ stats->data_type = BCH_DATA_journal;
+ ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
+
+ ret = bch2_move_btree(c,
+ op.start_btree, op.start_pos,
+ op.end_btree, op.end_pos,
+ migrate_btree_pred, &op, stats) ?: ret;
+ ret = bch2_replicas_gc2(c) ?: ret;
+
+ ret = bch2_move_data(c,
+ op.start_btree, op.start_pos,
+ op.end_btree, op.end_pos,
+ NULL,
+ stats,
+ writepoint_hashed((unsigned long) current),
+ true,
+ migrate_pred, &op) ?: ret;
+ ret = bch2_replicas_gc2(c) ?: ret;
+ break;
+ case BCH_DATA_OP_REWRITE_OLD_NODES:
+ bch2_move_stats_init(stats, "rewrite_old_nodes");
+ ret = bch2_scan_old_btree_nodes(c, stats);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
+{
+ struct bch_move_stats *stats = ctxt->stats;
+ struct moving_io *io;
+
+ prt_printf(out, "%s (%ps):", stats->name, ctxt->fn);
+ prt_newline(out);
+
+ prt_printf(out, " data type %s btree_id %s position: ",
+ bch2_data_types[stats->data_type],
+ bch2_btree_ids[stats->btree_id]);
+ bch2_bpos_to_text(out, stats->pos);
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "reads: ios %u/%u sectors %u/%u",
+ atomic_read(&ctxt->read_ios),
+ c->opts.move_ios_in_flight,
+ atomic_read(&ctxt->read_sectors),
+ c->opts.move_bytes_in_flight >> 9);
+ prt_newline(out);
+
+ prt_printf(out, "writes: ios %u/%u sectors %u/%u",
+ atomic_read(&ctxt->write_ios),
+ c->opts.move_ios_in_flight,
+ atomic_read(&ctxt->write_sectors),
+ c->opts.move_bytes_in_flight >> 9);
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+
+ mutex_lock(&ctxt->lock);
+ list_for_each_entry(io, &ctxt->ios, io_list)
+ bch2_write_op_to_text(out, &io->write.op);
+ mutex_unlock(&ctxt->lock);
+
+ printbuf_indent_sub(out, 4);
+}
+
+void bch2_fs_moving_ctxts_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct moving_context *ctxt;
+
+ mutex_lock(&c->moving_context_lock);
+ list_for_each_entry(ctxt, &c->moving_context_list, list)
+ bch2_moving_ctxt_to_text(out, c, ctxt);
+ mutex_unlock(&c->moving_context_lock);
+}
+
+void bch2_fs_move_init(struct bch_fs *c)
+{
+ INIT_LIST_HEAD(&c->moving_context_list);
+ mutex_init(&c->moving_context_lock);
+
+ INIT_LIST_HEAD(&c->data_progress_list);
+ mutex_init(&c->data_progress_lock);
+}
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
new file mode 100644
index 000000000000..cbdd58db8782
--- /dev/null
+++ b/fs/bcachefs/move.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_MOVE_H
+#define _BCACHEFS_MOVE_H
+
+#include "bcachefs_ioctl.h"
+#include "btree_iter.h"
+#include "buckets.h"
+#include "data_update.h"
+#include "move_types.h"
+
+struct bch_read_bio;
+
+struct moving_context {
+ struct bch_fs *c;
+ struct list_head list;
+ void *fn;
+
+ struct bch_ratelimit *rate;
+ struct bch_move_stats *stats;
+ struct write_point_specifier wp;
+ bool wait_on_copygc;
+ bool write_error;
+
+ /* For waiting on outstanding reads and writes: */
+ struct closure cl;
+
+ struct mutex lock;
+ struct list_head reads;
+ struct list_head ios;
+
+ /* in flight sectors: */
+ atomic_t read_sectors;
+ atomic_t write_sectors;
+ atomic_t read_ios;
+ atomic_t write_ios;
+
+ wait_queue_head_t wait;
+};
+
+#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
+do { \
+ bool cond_finished = false; \
+ bch2_moving_ctxt_do_pending_writes(_ctxt, _trans); \
+ \
+ if (_cond) \
+ break; \
+ __wait_event((_ctxt)->wait, \
+ bch2_moving_ctxt_next_pending_write(_ctxt) || \
+ (cond_finished = (_cond))); \
+ if (cond_finished) \
+ break; \
+} while (1)
+
+typedef bool (*move_pred_fn)(struct bch_fs *, void *, struct bkey_s_c,
+ struct bch_io_opts *, struct data_update_opts *);
+
+void bch2_moving_ctxt_exit(struct moving_context *);
+void bch2_moving_ctxt_init(struct moving_context *, struct bch_fs *,
+ struct bch_ratelimit *, struct bch_move_stats *,
+ struct write_point_specifier, bool);
+struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *);
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *,
+ struct btree_trans *);
+
+int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *);
+
+int bch2_move_data(struct bch_fs *,
+ enum btree_id, struct bpos,
+ enum btree_id, struct bpos,
+ struct bch_ratelimit *,
+ struct bch_move_stats *,
+ struct write_point_specifier,
+ bool,
+ move_pred_fn, void *);
+
+int __bch2_evacuate_bucket(struct btree_trans *,
+ struct moving_context *,
+ struct move_bucket_in_flight *,
+ struct bpos, int,
+ struct data_update_opts);
+int bch2_evacuate_bucket(struct bch_fs *, struct bpos, int,
+ struct data_update_opts,
+ struct bch_ratelimit *,
+ struct bch_move_stats *,
+ struct write_point_specifier,
+ bool);
+int bch2_data_job(struct bch_fs *,
+ struct bch_move_stats *,
+ struct bch_ioctl_data);
+
+void bch2_move_stats_init(struct bch_move_stats *stats, char *name);
+void bch2_fs_moving_ctxts_to_text(struct printbuf *, struct bch_fs *);
+
+void bch2_fs_move_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_MOVE_H */
diff --git a/fs/bcachefs/move_types.h b/fs/bcachefs/move_types.h
new file mode 100644
index 000000000000..baf1f8570b3f
--- /dev/null
+++ b/fs/bcachefs/move_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_MOVE_TYPES_H
+#define _BCACHEFS_MOVE_TYPES_H
+
+struct bch_move_stats {
+ enum bch_data_type data_type;
+ enum btree_id btree_id;
+ struct bpos pos;
+ struct list_head list;
+ char name[32];
+
+ atomic64_t keys_moved;
+ atomic64_t keys_raced;
+ atomic64_t sectors_moved;
+ atomic64_t sectors_seen;
+ atomic64_t sectors_raced;
+};
+
+struct move_bucket_key {
+ struct bpos bucket;
+ u8 gen;
+};
+
+struct move_bucket {
+ struct move_bucket_key k;
+ unsigned sectors;
+};
+
+struct move_bucket_in_flight {
+ struct move_bucket_in_flight *next;
+ struct rhash_head hash;
+ struct move_bucket bucket;
+ atomic_t count;
+};
+
+#endif /* _BCACHEFS_MOVE_TYPES_H */
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
new file mode 100644
index 000000000000..4017120baeee
--- /dev/null
+++ b/fs/bcachefs/movinggc.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Moving/copying garbage collector
+ *
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "btree_iter.h"
+#include "btree_update.h"
+#include "btree_write_buffer.h"
+#include "buckets.h"
+#include "clock.h"
+#include "errcode.h"
+#include "error.h"
+#include "lru.h"
+#include "move.h"
+#include "movinggc.h"
+#include "trace.h"
+
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/math64.h>
+#include <linux/sched/task.h>
+#include <linux/wait.h>
+
+struct buckets_in_flight {
+ struct rhashtable table;
+ struct move_bucket_in_flight *first;
+ struct move_bucket_in_flight *last;
+ size_t nr;
+ size_t sectors;
+};
+
+static const struct rhashtable_params bch_move_bucket_params = {
+ .head_offset = offsetof(struct move_bucket_in_flight, hash),
+ .key_offset = offsetof(struct move_bucket_in_flight, bucket.k),
+ .key_len = sizeof(struct move_bucket_key),
+};
+
+static struct move_bucket_in_flight *
+move_bucket_in_flight_add(struct buckets_in_flight *list, struct move_bucket b)
+{
+ struct move_bucket_in_flight *new = kzalloc(sizeof(*new), GFP_KERNEL);
+ int ret;
+
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ new->bucket = b;
+
+ ret = rhashtable_lookup_insert_fast(&list->table, &new->hash,
+ bch_move_bucket_params);
+ if (ret) {
+ kfree(new);
+ return ERR_PTR(ret);
+ }
+
+ if (!list->first)
+ list->first = new;
+ else
+ list->last->next = new;
+
+ list->last = new;
+ list->nr++;
+ list->sectors += b.sectors;
+ return new;
+}
+
+static int bch2_bucket_is_movable(struct btree_trans *trans,
+ struct move_bucket *b, u64 time)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_alloc_v4 _a;
+ const struct bch_alloc_v4 *a;
+ int ret;
+
+ if (bch2_bucket_is_open(trans->c,
+ b->k.bucket.inode,
+ b->k.bucket.offset))
+ return 0;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+ b->k.bucket, BTREE_ITER_CACHED);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ a = bch2_alloc_to_v4(k, &_a);
+ b->k.gen = a->gen;
+ b->sectors = a->dirty_sectors;
+
+ ret = data_type_movable(a->data_type) &&
+ a->fragmentation_lru &&
+ a->fragmentation_lru <= time;
+
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static void move_buckets_wait(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct buckets_in_flight *list,
+ bool flush)
+{
+ struct move_bucket_in_flight *i;
+ int ret;
+
+ while ((i = list->first)) {
+ if (flush)
+ move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count));
+
+ if (atomic_read(&i->count))
+ break;
+
+ list->first = i->next;
+ if (!list->first)
+ list->last = NULL;
+
+ list->nr--;
+ list->sectors -= i->bucket.sectors;
+
+ ret = rhashtable_remove_fast(&list->table, &i->hash,
+ bch_move_bucket_params);
+ BUG_ON(ret);
+ kfree(i);
+ }
+
+ bch2_trans_unlock(trans);
+}
+
+static bool bucket_in_flight(struct buckets_in_flight *list,
+ struct move_bucket_key k)
+{
+ return rhashtable_lookup_fast(&list->table, &k, bch_move_bucket_params);
+}
+
+typedef DARRAY(struct move_bucket) move_buckets;
+
+static int bch2_copygc_get_buckets(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct buckets_in_flight *buckets_in_flight,
+ move_buckets *buckets)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
+ size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
+ int ret;
+
+ move_buckets_wait(trans, ctxt, buckets_in_flight, false);
+
+ ret = bch2_btree_write_buffer_flush(trans);
+ if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
+ __func__, bch2_err_str(ret)))
+ return ret;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
+ lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
+ lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
+ 0, k, ({
+ struct move_bucket b = { .k.bucket = u64_to_bucket(k.k->p.offset) };
+ int ret2 = 0;
+
+ saw++;
+
+ if (!bch2_bucket_is_movable(trans, &b, lru_pos_time(k.k->p)))
+ not_movable++;
+ else if (bucket_in_flight(buckets_in_flight, b.k))
+ in_flight++;
+ else {
+ ret2 = darray_push(buckets, b) ?: buckets->nr >= nr_to_get;
+ if (ret2 >= 0)
+ sectors += b.sectors;
+ }
+ ret2;
+ }));
+
+ pr_debug("have: %zu (%zu) saw %zu in flight %zu not movable %zu got %zu (%zu)/%zu buckets ret %i",
+ buckets_in_flight->nr, buckets_in_flight->sectors,
+ saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret);
+
+ return ret < 0 ? ret : 0;
+}
+
+noinline
+static int bch2_copygc(struct btree_trans *trans,
+ struct moving_context *ctxt,
+ struct buckets_in_flight *buckets_in_flight)
+{
+ struct bch_fs *c = trans->c;
+ struct data_update_opts data_opts = {
+ .btree_insert_flags = BCH_WATERMARK_copygc,
+ };
+ move_buckets buckets = { 0 };
+ struct move_bucket_in_flight *f;
+ struct move_bucket *i;
+ u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
+ int ret = 0;
+
+ ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets);
+ if (ret)
+ goto err;
+
+ darray_for_each(buckets, i) {
+ if (unlikely(freezing(current)))
+ break;
+
+ f = move_bucket_in_flight_add(buckets_in_flight, *i);
+ ret = PTR_ERR_OR_ZERO(f);
+ if (ret == -EEXIST) { /* rare race: copygc_get_buckets returned same bucket more than once */
+ ret = 0;
+ continue;
+ }
+ if (ret == -ENOMEM) { /* flush IO, continue later */
+ ret = 0;
+ break;
+ }
+
+ ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket.k.bucket,
+ f->bucket.k.gen, data_opts);
+ if (ret)
+ goto err;
+ }
+err:
+ darray_exit(&buckets);
+
+ /* no entries in LRU btree found, or got to end: */
+ if (bch2_err_matches(ret, ENOENT))
+ ret = 0;
+
+ if (ret < 0 && !bch2_err_matches(ret, EROFS))
+ bch_err_msg(c, ret, "from bch2_move_data()");
+
+ moved = atomic64_read(&ctxt->stats->sectors_moved) - moved;
+ trace_and_count(c, copygc, c, moved, 0, 0, 0);
+ return ret;
+}
+
+/*
+ * Copygc runs when the amount of fragmented data is above some arbitrary
+ * threshold:
+ *
+ * The threshold at the limit - when the device is full - is the amount of space
+ * we reserved in bch2_recalc_capacity; we can't have more than that amount of
+ * disk space stranded due to fragmentation and store everything we have
+ * promised to store.
+ *
+ * But we don't want to be running copygc unnecessarily when the device still
+ * has plenty of free space - rather, we want copygc to smoothly run every so
+ * often and continually reduce the amount of fragmented space as the device
+ * fills up. So, we increase the threshold by half the current free space.
+ */
+unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned dev_idx;
+ s64 wait = S64_MAX, fragmented_allowed, fragmented;
+ unsigned i;
+
+ for_each_rw_member(ca, c, dev_idx) {
+ struct bch_dev_usage usage = bch2_dev_usage_read(ca);
+
+ fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
+ ca->mi.bucket_size) >> 1);
+ fragmented = 0;
+
+ for (i = 0; i < BCH_DATA_NR; i++)
+ if (data_type_movable(i))
+ fragmented += usage.d[i].fragmented;
+
+ wait = min(wait, max(0LL, fragmented_allowed - fragmented));
+ }
+
+ return wait;
+}
+
+void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ prt_printf(out, "Currently waiting for: ");
+ prt_human_readable_u64(out, max(0LL, c->copygc_wait -
+ atomic64_read(&c->io_clock[WRITE].now)) << 9);
+ prt_newline(out);
+
+ prt_printf(out, "Currently waiting since: ");
+ prt_human_readable_u64(out, max(0LL,
+ atomic64_read(&c->io_clock[WRITE].now) -
+ c->copygc_wait_at) << 9);
+ prt_newline(out);
+
+ prt_printf(out, "Currently calculated wait: ");
+ prt_human_readable_u64(out, bch2_copygc_wait_amount(c));
+ prt_newline(out);
+}
+
+static int bch2_copygc_thread(void *arg)
+{
+ struct bch_fs *c = arg;
+ struct btree_trans *trans;
+ struct moving_context ctxt;
+ struct bch_move_stats move_stats;
+ struct io_clock *clock = &c->io_clock[WRITE];
+ struct buckets_in_flight buckets;
+ u64 last, wait;
+ int ret = 0;
+
+ memset(&buckets, 0, sizeof(buckets));
+
+ ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
+ if (ret) {
+ bch_err_msg(c, ret, "allocating copygc buckets in flight");
+ return ret;
+ }
+
+ set_freezable();
+ trans = bch2_trans_get(c);
+
+ bch2_move_stats_init(&move_stats, "copygc");
+ bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
+ writepoint_ptr(&c->copygc_write_point),
+ false);
+
+ while (!ret && !kthread_should_stop()) {
+ bch2_trans_unlock(trans);
+ cond_resched();
+
+ if (!c->copy_gc_enabled) {
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ kthread_wait_freezable(c->copy_gc_enabled);
+ }
+
+ if (unlikely(freezing(current))) {
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ __refrigerator(false);
+ continue;
+ }
+
+ last = atomic64_read(&clock->now);
+ wait = bch2_copygc_wait_amount(c);
+
+ if (wait > clock->max_slop) {
+ c->copygc_wait_at = last;
+ c->copygc_wait = last + wait;
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ trace_and_count(c, copygc_wait, c, wait, last + wait);
+ bch2_kthread_io_clock_wait(clock, last + wait,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
+ }
+
+ c->copygc_wait = 0;
+
+ c->copygc_running = true;
+ ret = bch2_copygc(trans, &ctxt, &buckets);
+ c->copygc_running = false;
+
+ wake_up(&c->copygc_running_wq);
+ }
+
+ move_buckets_wait(trans, &ctxt, &buckets, true);
+ rhashtable_destroy(&buckets.table);
+ bch2_trans_put(trans);
+ bch2_moving_ctxt_exit(&ctxt);
+
+ return 0;
+}
+
+void bch2_copygc_stop(struct bch_fs *c)
+{
+ if (c->copygc_thread) {
+ kthread_stop(c->copygc_thread);
+ put_task_struct(c->copygc_thread);
+ }
+ c->copygc_thread = NULL;
+}
+
+int bch2_copygc_start(struct bch_fs *c)
+{
+ struct task_struct *t;
+ int ret;
+
+ if (c->copygc_thread)
+ return 0;
+
+ if (c->opts.nochanges)
+ return 0;
+
+ if (bch2_fs_init_fault("copygc_start"))
+ return -ENOMEM;
+
+ t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
+ ret = PTR_ERR_OR_ZERO(t);
+ if (ret) {
+ bch_err_msg(c, ret, "creating copygc thread");
+ return ret;
+ }
+
+ get_task_struct(t);
+
+ c->copygc_thread = t;
+ wake_up_process(c->copygc_thread);
+
+ return 0;
+}
+
+void bch2_fs_copygc_init(struct bch_fs *c)
+{
+ init_waitqueue_head(&c->copygc_running_wq);
+ c->copygc_running = false;
+}
diff --git a/fs/bcachefs/movinggc.h b/fs/bcachefs/movinggc.h
new file mode 100644
index 000000000000..ea181fef5bc9
--- /dev/null
+++ b/fs/bcachefs/movinggc.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_MOVINGGC_H
+#define _BCACHEFS_MOVINGGC_H
+
+unsigned long bch2_copygc_wait_amount(struct bch_fs *);
+void bch2_copygc_wait_to_text(struct printbuf *, struct bch_fs *);
+
+void bch2_copygc_stop(struct bch_fs *);
+int bch2_copygc_start(struct bch_fs *);
+void bch2_fs_copygc_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_MOVINGGC_H */
diff --git a/fs/bcachefs/nocow_locking.c b/fs/bcachefs/nocow_locking.c
new file mode 100644
index 000000000000..3c21981a4a1c
--- /dev/null
+++ b/fs/bcachefs/nocow_locking.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_methods.h"
+#include "nocow_locking.h"
+#include "util.h"
+
+#include <linux/closure.h>
+
+bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t, struct bpos bucket)
+{
+ u64 dev_bucket = bucket_to_u64(bucket);
+ struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(l->b); i++)
+ if (l->b[i] == dev_bucket && atomic_read(&l->l[i]))
+ return true;
+ return false;
+}
+
+#define sign(v) (v < 0 ? -1 : v > 0 ? 1 : 0)
+
+void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t, struct bpos bucket, int flags)
+{
+ u64 dev_bucket = bucket_to_u64(bucket);
+ struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
+ int lock_val = flags ? 1 : -1;
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(l->b); i++)
+ if (l->b[i] == dev_bucket) {
+ int v = atomic_sub_return(lock_val, &l->l[i]);
+
+ BUG_ON(v && sign(v) != lock_val);
+ if (!v)
+ closure_wake_up(&l->wait);
+ return;
+ }
+
+ BUG();
+}
+
+bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *l,
+ u64 dev_bucket, int flags)
+{
+ int v, lock_val = flags ? 1 : -1;
+ unsigned i;
+
+ spin_lock(&l->lock);
+
+ for (i = 0; i < ARRAY_SIZE(l->b); i++)
+ if (l->b[i] == dev_bucket)
+ goto got_entry;
+
+ for (i = 0; i < ARRAY_SIZE(l->b); i++)
+ if (!atomic_read(&l->l[i])) {
+ l->b[i] = dev_bucket;
+ goto take_lock;
+ }
+fail:
+ spin_unlock(&l->lock);
+ return false;
+got_entry:
+ v = atomic_read(&l->l[i]);
+ if (lock_val > 0 ? v < 0 : v > 0)
+ goto fail;
+take_lock:
+ v = atomic_read(&l->l[i]);
+ /* Overflow? */
+ if (v && sign(v + lock_val) != sign(v))
+ goto fail;
+
+ atomic_add(lock_val, &l->l[i]);
+ spin_unlock(&l->lock);
+ return true;
+}
+
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct nocow_lock_bucket *l,
+ u64 dev_bucket, int flags)
+{
+ if (!__bch2_bucket_nocow_trylock(l, dev_bucket, flags)) {
+ struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
+ u64 start_time = local_clock();
+
+ __closure_wait_event(&l->wait, __bch2_bucket_nocow_trylock(l, dev_bucket, flags));
+ bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
+ }
+}
+
+void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_table *t)
+
+{
+ unsigned i, nr_zero = 0;
+ struct nocow_lock_bucket *l;
+
+ for (l = t->l; l < t->l + ARRAY_SIZE(t->l); l++) {
+ unsigned v = 0;
+
+ for (i = 0; i < ARRAY_SIZE(l->l); i++)
+ v |= atomic_read(&l->l[i]);
+
+ if (!v) {
+ nr_zero++;
+ continue;
+ }
+
+ if (nr_zero)
+ prt_printf(out, "(%u empty entries)\n", nr_zero);
+ nr_zero = 0;
+
+ for (i = 0; i < ARRAY_SIZE(l->l); i++) {
+ int v = atomic_read(&l->l[i]);
+ if (v) {
+ bch2_bpos_to_text(out, u64_to_bucket(l->b[i]));
+ prt_printf(out, ": %s %u ", v < 0 ? "copy" : "update", abs(v));
+ }
+ }
+ prt_newline(out);
+ }
+
+ if (nr_zero)
+ prt_printf(out, "(%u empty entries)\n", nr_zero);
+}
+
+void bch2_fs_nocow_locking_exit(struct bch_fs *c)
+{
+ struct bucket_nocow_lock_table *t = &c->nocow_locks;
+
+ for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
+ for (unsigned j = 0; j < ARRAY_SIZE(l->l); j++)
+ BUG_ON(atomic_read(&l->l[j]));
+}
+
+int bch2_fs_nocow_locking_init(struct bch_fs *c)
+{
+ struct bucket_nocow_lock_table *t = &c->nocow_locks;
+
+ for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
+ spin_lock_init(&l->lock);
+
+ return 0;
+}
diff --git a/fs/bcachefs/nocow_locking.h b/fs/bcachefs/nocow_locking.h
new file mode 100644
index 000000000000..f9d6a426a960
--- /dev/null
+++ b/fs/bcachefs/nocow_locking.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_NOCOW_LOCKING_H
+#define _BCACHEFS_NOCOW_LOCKING_H
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "nocow_locking_types.h"
+
+#include <linux/hash.h>
+
+static inline struct nocow_lock_bucket *bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ u64 dev_bucket)
+{
+ unsigned h = hash_64(dev_bucket, BUCKET_NOCOW_LOCKS_BITS);
+
+ return t->l + (h & (BUCKET_NOCOW_LOCKS - 1));
+}
+
+#define BUCKET_NOCOW_LOCK_UPDATE (1 << 0)
+
+bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *, struct bpos);
+void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *, struct bpos, int);
+bool __bch2_bucket_nocow_trylock(struct nocow_lock_bucket *, u64, int);
+void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *,
+ struct nocow_lock_bucket *, u64, int);
+
+static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ u64 dev_bucket = bucket_to_u64(bucket);
+ struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
+
+ __bch2_bucket_nocow_lock(t, l, dev_bucket, flags);
+}
+
+static inline bool bch2_bucket_nocow_trylock(struct bucket_nocow_lock_table *t,
+ struct bpos bucket, int flags)
+{
+ u64 dev_bucket = bucket_to_u64(bucket);
+ struct nocow_lock_bucket *l = bucket_nocow_lock(t, dev_bucket);
+
+ return __bch2_bucket_nocow_trylock(l, dev_bucket, flags);
+}
+
+void bch2_nocow_locks_to_text(struct printbuf *, struct bucket_nocow_lock_table *);
+
+void bch2_fs_nocow_locking_exit(struct bch_fs *);
+int bch2_fs_nocow_locking_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_NOCOW_LOCKING_H */
diff --git a/fs/bcachefs/nocow_locking_types.h b/fs/bcachefs/nocow_locking_types.h
new file mode 100644
index 000000000000..bd12bf677924
--- /dev/null
+++ b/fs/bcachefs/nocow_locking_types.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_NOCOW_LOCKING_TYPES_H
+#define _BCACHEFS_NOCOW_LOCKING_TYPES_H
+
+#define BUCKET_NOCOW_LOCKS_BITS 10
+#define BUCKET_NOCOW_LOCKS (1U << BUCKET_NOCOW_LOCKS_BITS)
+
+struct nocow_lock_bucket {
+ struct closure_waitlist wait;
+ spinlock_t lock;
+ u64 b[4];
+ atomic_t l[4];
+} __aligned(SMP_CACHE_BYTES);
+
+struct bucket_nocow_lock_table {
+ struct nocow_lock_bucket l[BUCKET_NOCOW_LOCKS];
+};
+
+#endif /* _BCACHEFS_NOCOW_LOCKING_TYPES_H */
+
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
new file mode 100644
index 000000000000..232f50c73a94
--- /dev/null
+++ b/fs/bcachefs/opts.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/kernel.h>
+
+#include "bcachefs.h"
+#include "compress.h"
+#include "disk_groups.h"
+#include "error.h"
+#include "opts.h"
+#include "super-io.h"
+#include "util.h"
+
+#define x(t, n, ...) [n] = #t,
+
+const char * const bch2_iops_measurements[] = {
+ BCH_IOPS_MEASUREMENTS()
+ NULL
+};
+
+const char * const bch2_error_actions[] = {
+ BCH_ERROR_ACTIONS()
+ NULL
+};
+
+const char * const bch2_fsck_fix_opts[] = {
+ BCH_FIX_ERRORS_OPTS()
+ NULL
+};
+
+const char * const bch2_version_upgrade_opts[] = {
+ BCH_VERSION_UPGRADE_OPTS()
+ NULL
+};
+
+const char * const bch2_sb_features[] = {
+ BCH_SB_FEATURES()
+ NULL
+};
+
+const char * const bch2_sb_compat[] = {
+ BCH_SB_COMPAT()
+ NULL
+};
+
+const char * const bch2_btree_ids[] = {
+ BCH_BTREE_IDS()
+ "interior btree node",
+ NULL
+};
+
+const char * const bch2_csum_types[] = {
+ BCH_CSUM_TYPES()
+ NULL
+};
+
+const char * const bch2_csum_opts[] = {
+ BCH_CSUM_OPTS()
+ NULL
+};
+
+const char * const bch2_compression_types[] = {
+ BCH_COMPRESSION_TYPES()
+ NULL
+};
+
+const char * const bch2_compression_opts[] = {
+ BCH_COMPRESSION_OPTS()
+ NULL
+};
+
+const char * const bch2_str_hash_types[] = {
+ BCH_STR_HASH_TYPES()
+ NULL
+};
+
+const char * const bch2_str_hash_opts[] = {
+ BCH_STR_HASH_OPTS()
+ NULL
+};
+
+const char * const bch2_data_types[] = {
+ BCH_DATA_TYPES()
+ NULL
+};
+
+const char * const bch2_member_states[] = {
+ BCH_MEMBER_STATES()
+ NULL
+};
+
+const char * const bch2_jset_entry_types[] = {
+ BCH_JSET_ENTRY_TYPES()
+ NULL
+};
+
+const char * const bch2_fs_usage_types[] = {
+ BCH_FS_USAGE_TYPES()
+ NULL
+};
+
+#undef x
+
+static int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
+ struct printbuf *err)
+{
+ if (!val) {
+ *res = FSCK_FIX_yes;
+ } else {
+ int ret = match_string(bch2_fsck_fix_opts, -1, val);
+
+ if (ret < 0 && err)
+ prt_str(err, "fix_errors: invalid selection");
+ if (ret < 0)
+ return ret;
+ *res = ret;
+ }
+
+ return 0;
+}
+
+static void bch2_opt_fix_errors_to_text(struct printbuf *out,
+ struct bch_fs *c,
+ struct bch_sb *sb,
+ u64 v)
+{
+ prt_str(out, bch2_fsck_fix_opts[v]);
+}
+
+#define bch2_opt_fix_errors (struct bch_opt_fn) { \
+ .parse = bch2_opt_fix_errors_parse, \
+ .to_text = bch2_opt_fix_errors_to_text, \
+}
+
+const char * const bch2_d_types[BCH_DT_MAX] = {
+ [DT_UNKNOWN] = "unknown",
+ [DT_FIFO] = "fifo",
+ [DT_CHR] = "chr",
+ [DT_DIR] = "dir",
+ [DT_BLK] = "blk",
+ [DT_REG] = "reg",
+ [DT_LNK] = "lnk",
+ [DT_SOCK] = "sock",
+ [DT_WHT] = "whiteout",
+ [DT_SUBVOL] = "subvol",
+};
+
+u64 BCH2_NO_SB_OPT(const struct bch_sb *sb)
+{
+ BUG();
+}
+
+void SET_BCH2_NO_SB_OPT(struct bch_sb *sb, u64 v)
+{
+ BUG();
+}
+
+void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)
+{
+#define x(_name, ...) \
+ if (opt_defined(src, _name)) \
+ opt_set(*dst, _name, src._name);
+
+ BCH_OPTS()
+#undef x
+}
+
+bool bch2_opt_defined_by_id(const struct bch_opts *opts, enum bch_opt_id id)
+{
+ switch (id) {
+#define x(_name, ...) \
+ case Opt_##_name: \
+ return opt_defined(*opts, _name);
+ BCH_OPTS()
+#undef x
+ default:
+ BUG();
+ }
+}
+
+u64 bch2_opt_get_by_id(const struct bch_opts *opts, enum bch_opt_id id)
+{
+ switch (id) {
+#define x(_name, ...) \
+ case Opt_##_name: \
+ return opts->_name;
+ BCH_OPTS()
+#undef x
+ default:
+ BUG();
+ }
+}
+
+void bch2_opt_set_by_id(struct bch_opts *opts, enum bch_opt_id id, u64 v)
+{
+ switch (id) {
+#define x(_name, ...) \
+ case Opt_##_name: \
+ opt_set(*opts, _name, v); \
+ break;
+ BCH_OPTS()
+#undef x
+ default:
+ BUG();
+ }
+}
+
+const struct bch_option bch2_opt_table[] = {
+#define OPT_BOOL() .type = BCH_OPT_BOOL, .min = 0, .max = 2
+#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, \
+ .min = _min, .max = _max
+#define OPT_STR(_choices) .type = BCH_OPT_STR, \
+ .min = 0, .max = ARRAY_SIZE(_choices), \
+ .choices = _choices
+#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
+
+#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
+ [Opt_##_name] = { \
+ .attr = { \
+ .name = #_name, \
+ .mode = (_flags) & OPT_RUNTIME ? 0644 : 0444, \
+ }, \
+ .flags = _flags, \
+ .hint = _hint, \
+ .help = _help, \
+ .get_sb = _sb_opt, \
+ .set_sb = SET_##_sb_opt, \
+ _type \
+ },
+
+ BCH_OPTS()
+#undef x
+};
+
+int bch2_opt_lookup(const char *name)
+{
+ const struct bch_option *i;
+
+ for (i = bch2_opt_table;
+ i < bch2_opt_table + ARRAY_SIZE(bch2_opt_table);
+ i++)
+ if (!strcmp(name, i->attr.name))
+ return i - bch2_opt_table;
+
+ return -1;
+}
+
+struct synonym {
+ const char *s1, *s2;
+};
+
+static const struct synonym bch_opt_synonyms[] = {
+ { "quota", "usrquota" },
+};
+
+static int bch2_mount_opt_lookup(const char *name)
+{
+ const struct synonym *i;
+
+ for (i = bch_opt_synonyms;
+ i < bch_opt_synonyms + ARRAY_SIZE(bch_opt_synonyms);
+ i++)
+ if (!strcmp(name, i->s1))
+ name = i->s2;
+
+ return bch2_opt_lookup(name);
+}
+
+int bch2_opt_validate(const struct bch_option *opt, u64 v, struct printbuf *err)
+{
+ if (v < opt->min) {
+ if (err)
+ prt_printf(err, "%s: too small (min %llu)",
+ opt->attr.name, opt->min);
+ return -ERANGE;
+ }
+
+ if (opt->max && v >= opt->max) {
+ if (err)
+ prt_printf(err, "%s: too big (max %llu)",
+ opt->attr.name, opt->max);
+ return -ERANGE;
+ }
+
+ if ((opt->flags & OPT_SB_FIELD_SECTORS) && (v & 511)) {
+ if (err)
+ prt_printf(err, "%s: not a multiple of 512",
+ opt->attr.name);
+ return -EINVAL;
+ }
+
+ if ((opt->flags & OPT_MUST_BE_POW_2) && !is_power_of_2(v)) {
+ if (err)
+ prt_printf(err, "%s: must be a power of two",
+ opt->attr.name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bch2_opt_parse(struct bch_fs *c,
+ const struct bch_option *opt,
+ const char *val, u64 *res,
+ struct printbuf *err)
+{
+ ssize_t ret;
+
+ switch (opt->type) {
+ case BCH_OPT_BOOL:
+ if (val) {
+ ret = kstrtou64(val, 10, res);
+ } else {
+ ret = 0;
+ *res = 1;
+ }
+
+ if (ret < 0 || (*res != 0 && *res != 1)) {
+ if (err)
+ prt_printf(err, "%s: must be bool", opt->attr.name);
+ return ret;
+ }
+ break;
+ case BCH_OPT_UINT:
+ if (!val) {
+ prt_printf(err, "%s: required value",
+ opt->attr.name);
+ return -EINVAL;
+ }
+
+ ret = opt->flags & OPT_HUMAN_READABLE
+ ? bch2_strtou64_h(val, res)
+ : kstrtou64(val, 10, res);
+ if (ret < 0) {
+ if (err)
+ prt_printf(err, "%s: must be a number",
+ opt->attr.name);
+ return ret;
+ }
+ break;
+ case BCH_OPT_STR:
+ if (!val) {
+ prt_printf(err, "%s: required value",
+ opt->attr.name);
+ return -EINVAL;
+ }
+
+ ret = match_string(opt->choices, -1, val);
+ if (ret < 0) {
+ if (err)
+ prt_printf(err, "%s: invalid selection",
+ opt->attr.name);
+ return ret;
+ }
+
+ *res = ret;
+ break;
+ case BCH_OPT_FN:
+ ret = opt->fn.parse(c, val, res, err);
+ if (ret < 0) {
+ if (err)
+ prt_printf(err, "%s: parse error",
+ opt->attr.name);
+ return ret;
+ }
+ }
+
+ return bch2_opt_validate(opt, *res, err);
+}
+
+void bch2_opt_to_text(struct printbuf *out,
+ struct bch_fs *c, struct bch_sb *sb,
+ const struct bch_option *opt, u64 v,
+ unsigned flags)
+{
+ if (flags & OPT_SHOW_MOUNT_STYLE) {
+ if (opt->type == BCH_OPT_BOOL) {
+ prt_printf(out, "%s%s",
+ v ? "" : "no",
+ opt->attr.name);
+ return;
+ }
+
+ prt_printf(out, "%s=", opt->attr.name);
+ }
+
+ switch (opt->type) {
+ case BCH_OPT_BOOL:
+ case BCH_OPT_UINT:
+ if (opt->flags & OPT_HUMAN_READABLE)
+ prt_human_readable_u64(out, v);
+ else
+ prt_printf(out, "%lli", v);
+ break;
+ case BCH_OPT_STR:
+ if (flags & OPT_SHOW_FULL_LIST)
+ prt_string_option(out, opt->choices, v);
+ else
+ prt_str(out, opt->choices[v]);
+ break;
+ case BCH_OPT_FN:
+ opt->fn.to_text(out, c, sb, v);
+ break;
+ default:
+ BUG();
+ }
+}
+
+int bch2_opt_check_may_set(struct bch_fs *c, int id, u64 v)
+{
+ int ret = 0;
+
+ switch (id) {
+ case Opt_compression:
+ case Opt_background_compression:
+ ret = bch2_check_set_has_compressed_data(c, v);
+ break;
+ case Opt_erasure_code:
+ if (v)
+ bch2_check_set_feature(c, BCH_FEATURE_ec);
+ break;
+ }
+
+ return ret;
+}
+
+int bch2_opts_check_may_set(struct bch_fs *c)
+{
+ unsigned i;
+ int ret;
+
+ for (i = 0; i < bch2_opts_nr; i++) {
+ ret = bch2_opt_check_may_set(c, i,
+ bch2_opt_get_by_id(&c->opts, i));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
+ char *options)
+{
+ char *copied_opts, *copied_opts_start;
+ char *opt, *name, *val;
+ int ret, id;
+ struct printbuf err = PRINTBUF;
+ u64 v;
+
+ if (!options)
+ return 0;
+
+ /*
+ * sys_fsconfig() is now occasionally providing us with option lists
+ * starting with a comma - weird.
+ */
+ if (*options == ',')
+ options++;
+
+ copied_opts = kstrdup(options, GFP_KERNEL);
+ if (!copied_opts)
+ return -1;
+ copied_opts_start = copied_opts;
+
+ while ((opt = strsep(&copied_opts, ",")) != NULL) {
+ name = strsep(&opt, "=");
+ val = opt;
+
+ id = bch2_mount_opt_lookup(name);
+
+ /* Check for the form "noopt", negation of a boolean opt: */
+ if (id < 0 &&
+ !val &&
+ !strncmp("no", name, 2)) {
+ id = bch2_mount_opt_lookup(name + 2);
+ val = "0";
+ }
+
+ /* Unknown options are ignored: */
+ if (id < 0)
+ continue;
+
+ if (!(bch2_opt_table[id].flags & OPT_MOUNT))
+ goto bad_opt;
+
+ if (id == Opt_acl &&
+ !IS_ENABLED(CONFIG_BCACHEFS_POSIX_ACL))
+ goto bad_opt;
+
+ if ((id == Opt_usrquota ||
+ id == Opt_grpquota) &&
+ !IS_ENABLED(CONFIG_BCACHEFS_QUOTA))
+ goto bad_opt;
+
+ ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v, &err);
+ if (ret < 0)
+ goto bad_val;
+
+ bch2_opt_set_by_id(opts, id, v);
+ }
+
+ ret = 0;
+ goto out;
+
+bad_opt:
+ pr_err("Bad mount option %s", name);
+ ret = -1;
+ goto out;
+bad_val:
+ pr_err("Invalid mount option %s", err.buf);
+ ret = -1;
+ goto out;
+out:
+ kfree(copied_opts_start);
+ printbuf_exit(&err);
+ return ret;
+}
+
+u64 bch2_opt_from_sb(struct bch_sb *sb, enum bch_opt_id id)
+{
+ const struct bch_option *opt = bch2_opt_table + id;
+ u64 v;
+
+ v = opt->get_sb(sb);
+
+ if (opt->flags & OPT_SB_FIELD_ILOG2)
+ v = 1ULL << v;
+
+ if (opt->flags & OPT_SB_FIELD_SECTORS)
+ v <<= 9;
+
+ return v;
+}
+
+/*
+ * Initial options from superblock - here we don't want any options undefined,
+ * any options the superblock doesn't specify are set to 0:
+ */
+int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
+{
+ unsigned id;
+
+ for (id = 0; id < bch2_opts_nr; id++) {
+ const struct bch_option *opt = bch2_opt_table + id;
+
+ if (opt->get_sb == BCH2_NO_SB_OPT)
+ continue;
+
+ bch2_opt_set_by_id(opts, id, bch2_opt_from_sb(sb, id));
+ }
+
+ return 0;
+}
+
+void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
+{
+ if (opt->set_sb == SET_BCH2_NO_SB_OPT)
+ return;
+
+ if (opt->flags & OPT_SB_FIELD_SECTORS)
+ v >>= 9;
+
+ if (opt->flags & OPT_SB_FIELD_ILOG2)
+ v = ilog2(v);
+
+ opt->set_sb(sb, v);
+}
+
+void bch2_opt_set_sb(struct bch_fs *c, const struct bch_option *opt, u64 v)
+{
+ if (opt->set_sb == SET_BCH2_NO_SB_OPT)
+ return;
+
+ mutex_lock(&c->sb_lock);
+ __bch2_opt_set_sb(c->disk_sb.sb, opt, v);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+}
+
+/* io opts: */
+
+struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src)
+{
+ return (struct bch_io_opts) {
+#define x(_name, _bits) ._name = src._name,
+ BCH_INODE_OPTS()
+#undef x
+ };
+}
+
+bool bch2_opt_is_inode_opt(enum bch_opt_id id)
+{
+ static const enum bch_opt_id inode_opt_list[] = {
+#define x(_name, _bits) Opt_##_name,
+ BCH_INODE_OPTS()
+#undef x
+ };
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(inode_opt_list); i++)
+ if (inode_opt_list[i] == id)
+ return true;
+
+ return false;
+}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
new file mode 100644
index 000000000000..55014336c5f7
--- /dev/null
+++ b/fs/bcachefs/opts.h
@@ -0,0 +1,564 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_OPTS_H
+#define _BCACHEFS_OPTS_H
+
+#include <linux/bug.h>
+#include <linux/log2.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "bcachefs_format.h"
+
+struct bch_fs;
+
+extern const char * const bch2_iops_measurements[];
+extern const char * const bch2_error_actions[];
+extern const char * const bch2_fsck_fix_opts[];
+extern const char * const bch2_version_upgrade_opts[];
+extern const char * const bch2_sb_features[];
+extern const char * const bch2_sb_compat[];
+extern const char * const bch2_btree_ids[];
+extern const char * const bch2_csum_types[];
+extern const char * const bch2_csum_opts[];
+extern const char * const bch2_compression_types[];
+extern const char * const bch2_compression_opts[];
+extern const char * const bch2_str_hash_types[];
+extern const char * const bch2_str_hash_opts[];
+extern const char * const bch2_data_types[];
+extern const char * const bch2_member_states[];
+extern const char * const bch2_jset_entry_types[];
+extern const char * const bch2_fs_usage_types[];
+extern const char * const bch2_d_types[];
+
+static inline const char *bch2_d_type_str(unsigned d_type)
+{
+ return (d_type < BCH_DT_MAX ? bch2_d_types[d_type] : NULL) ?: "(bad d_type)";
+}
+
+/*
+ * Mount options; we also store defaults in the superblock.
+ *
+ * Also exposed via sysfs: if an option is writeable, and it's also stored in
+ * the superblock, changing it via sysfs (currently? might change this) also
+ * updates the superblock.
+ *
+ * We store options as signed integers, where -1 means undefined. This means we
+ * can pass the mount options to bch2_fs_alloc() as a whole struct, and then only
+ * apply the options from that struct that are defined.
+ */
+
+/* dummy option, for options that aren't stored in the superblock */
+u64 BCH2_NO_SB_OPT(const struct bch_sb *);
+void SET_BCH2_NO_SB_OPT(struct bch_sb *, u64);
+
+/* When can be set: */
+enum opt_flags {
+ OPT_FS = (1 << 0), /* Filesystem option */
+ OPT_DEVICE = (1 << 1), /* Device option */
+ OPT_INODE = (1 << 2), /* Inode option */
+ OPT_FORMAT = (1 << 3), /* May be specified at format time */
+ OPT_MOUNT = (1 << 4), /* May be specified at mount time */
+ OPT_RUNTIME = (1 << 5), /* May be specified at runtime */
+ OPT_HUMAN_READABLE = (1 << 6),
+ OPT_MUST_BE_POW_2 = (1 << 7), /* Must be power of 2 */
+ OPT_SB_FIELD_SECTORS = (1 << 8),/* Superblock field is >> 9 of actual value */
+ OPT_SB_FIELD_ILOG2 = (1 << 9), /* Superblock field is ilog2 of actual value */
+};
+
+enum opt_type {
+ BCH_OPT_BOOL,
+ BCH_OPT_UINT,
+ BCH_OPT_STR,
+ BCH_OPT_FN,
+};
+
+struct bch_opt_fn {
+ int (*parse)(struct bch_fs *, const char *, u64 *, struct printbuf *);
+ void (*to_text)(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
+};
+
+/**
+ * x(name, shortopt, type, in mem type, mode, sb_opt)
+ *
+ * @name - name of mount option, sysfs attribute, and struct bch_opts
+ * member
+ *
+ * @mode - when opt may be set
+ *
+ * @sb_option - name of corresponding superblock option
+ *
+ * @type - one of OPT_BOOL, OPT_UINT, OPT_STR
+ */
+
+/*
+ * XXX: add fields for
+ * - default value
+ * - helptext
+ */
+
+#ifdef __KERNEL__
+#define RATELIMIT_ERRORS_DEFAULT true
+#else
+#define RATELIMIT_ERRORS_DEFAULT false
+#endif
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+#define BCACHEFS_VERBOSE_DEFAULT true
+#else
+#define BCACHEFS_VERBOSE_DEFAULT false
+#endif
+
+#define BCH_FIX_ERRORS_OPTS() \
+ x(exit, 0) \
+ x(yes, 1) \
+ x(no, 2) \
+ x(ask, 3)
+
+enum fsck_err_opts {
+#define x(t, n) FSCK_FIX_##t,
+ BCH_FIX_ERRORS_OPTS()
+#undef x
+};
+
+#define BCH_OPTS() \
+ x(block_size, u16, \
+ OPT_FS|OPT_FORMAT| \
+ OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
+ OPT_UINT(512, 1U << 16), \
+ BCH_SB_BLOCK_SIZE, 8, \
+ "size", NULL) \
+ x(btree_node_size, u32, \
+ OPT_FS|OPT_FORMAT| \
+ OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
+ OPT_UINT(512, 1U << 20), \
+ BCH_SB_BTREE_NODE_SIZE, 512, \
+ "size", "Btree node size, default 256k") \
+ x(errors, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_STR(bch2_error_actions), \
+ BCH_SB_ERROR_ACTION, BCH_ON_ERROR_ro, \
+ NULL, "Action to take on filesystem error") \
+ x(metadata_replicas, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(1, BCH_REPLICAS_MAX), \
+ BCH_SB_META_REPLICAS_WANT, 1, \
+ "#", "Number of metadata replicas") \
+ x(data_replicas, u8, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(1, BCH_REPLICAS_MAX), \
+ BCH_SB_DATA_REPLICAS_WANT, 1, \
+ "#", "Number of data replicas") \
+ x(metadata_replicas_required, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_UINT(1, BCH_REPLICAS_MAX), \
+ BCH_SB_META_REPLICAS_REQ, 1, \
+ "#", NULL) \
+ x(data_replicas_required, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_UINT(1, BCH_REPLICAS_MAX), \
+ BCH_SB_DATA_REPLICAS_REQ, 1, \
+ "#", NULL) \
+ x(encoded_extent_max, u32, \
+ OPT_FS|OPT_FORMAT| \
+ OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS|OPT_SB_FIELD_ILOG2,\
+ OPT_UINT(4096, 2U << 20), \
+ BCH_SB_ENCODED_EXTENT_MAX_BITS, 64 << 10, \
+ "size", "Maximum size of checksummed/compressed extents")\
+ x(metadata_checksum, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_STR(bch2_csum_opts), \
+ BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
+ NULL, NULL) \
+ x(data_checksum, u8, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_STR(bch2_csum_opts), \
+ BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
+ NULL, NULL) \
+ x(compression, u8, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_FN(bch2_opt_compression), \
+ BCH_SB_COMPRESSION_TYPE, BCH_COMPRESSION_OPT_none, \
+ NULL, NULL) \
+ x(background_compression, u8, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_FN(bch2_opt_compression), \
+ BCH_SB_BACKGROUND_COMPRESSION_TYPE,BCH_COMPRESSION_OPT_none, \
+ NULL, NULL) \
+ x(str_hash, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_STR(bch2_str_hash_opts), \
+ BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_siphash, \
+ NULL, "Hash function for directory entries and xattrs")\
+ x(metadata_target, u16, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_FN(bch2_opt_target), \
+ BCH_SB_METADATA_TARGET, 0, \
+ "(target)", "Device or label for metadata writes") \
+ x(foreground_target, u16, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_FN(bch2_opt_target), \
+ BCH_SB_FOREGROUND_TARGET, 0, \
+ "(target)", "Device or label for foreground writes") \
+ x(background_target, u16, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_FN(bch2_opt_target), \
+ BCH_SB_BACKGROUND_TARGET, 0, \
+ "(target)", "Device or label to move data to in the background")\
+ x(promote_target, u16, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_FN(bch2_opt_target), \
+ BCH_SB_PROMOTE_TARGET, 0, \
+ "(target)", "Device or label to promote data to on read") \
+ x(erasure_code, u16, \
+ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_ERASURE_CODE, false, \
+ NULL, "Enable erasure coding (DO NOT USE YET)") \
+ x(inodes_32bit, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_INODE_32BIT, true, \
+ NULL, "Constrain inode numbers to 32 bits") \
+ x(shard_inode_numbers, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_SHARD_INUMS, true, \
+ NULL, "Shard new inode numbers by CPU id") \
+ x(inodes_use_key_cache, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH_SB_INODES_USE_KEY_CACHE, true, \
+ NULL, "Use the btree key cache for the inodes btree") \
+ x(btree_node_mem_ptr_optimization, u8, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, true, \
+ NULL, "Stash pointer to in memory btree node in btree ptr")\
+ x(btree_write_buffer_size, u32, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_UINT(16, (1U << 20) - 1), \
+ BCH2_NO_SB_OPT, 1U << 13, \
+ NULL, "Number of btree write buffer entries") \
+ x(gc_reserve_percent, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(5, 21), \
+ BCH_SB_GC_RESERVE, 8, \
+ "%", "Percentage of disk space to reserve for copygc")\
+ x(gc_reserve_bytes, u64, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME| \
+ OPT_HUMAN_READABLE|OPT_SB_FIELD_SECTORS, \
+ OPT_UINT(0, U64_MAX), \
+ BCH_SB_GC_RESERVE_BYTES, 0, \
+ "%", "Amount of disk space to reserve for copygc\n" \
+ "Takes precedence over gc_reserve_percent if set")\
+ x(root_reserve_percent, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_UINT(0, 100), \
+ BCH_SB_ROOT_RESERVE, 0, \
+ "%", "Percentage of disk space to reserve for superuser")\
+ x(wide_macs, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_128_BIT_MACS, false, \
+ NULL, "Store full 128 bits of cryptographic MACs, instead of 80")\
+ x(inline_data, u8, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, true, \
+ NULL, "Enable inline data extents") \
+ x(acl, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH_SB_POSIX_ACL, true, \
+ NULL, "Enable POSIX acls") \
+ x(usrquota, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH_SB_USRQUOTA, false, \
+ NULL, "Enable user quotas") \
+ x(grpquota, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH_SB_GRPQUOTA, false, \
+ NULL, "Enable group quotas") \
+ x(prjquota, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH_SB_PRJQUOTA, false, \
+ NULL, "Enable project quotas") \
+ x(degraded, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Allow mounting in degraded mode") \
+ x(very_degraded, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Allow mounting in when data will be missing") \
+ x(discard, u8, \
+ OPT_FS|OPT_MOUNT|OPT_DEVICE, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, true, \
+ NULL, "Enable discard/TRIM support") \
+ x(verbose, u8, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, BCACHEFS_VERBOSE_DEFAULT, \
+ NULL, "Extra debugging information during mount/recovery")\
+ x(journal_flush_delay, u32, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(1, U32_MAX), \
+ BCH_SB_JOURNAL_FLUSH_DELAY, 1000, \
+ NULL, "Delay in milliseconds before automatic journal commits")\
+ x(journal_flush_disabled, u8, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_JOURNAL_FLUSH_DISABLED,false, \
+ NULL, "Disable journal flush on sync/fsync\n" \
+ "If enabled, writes can be lost, but only since the\n"\
+ "last journal write (default 1 second)") \
+ x(journal_reclaim_delay, u32, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(0, U32_MAX), \
+ BCH_SB_JOURNAL_RECLAIM_DELAY, 100, \
+ NULL, "Delay in milliseconds before automatic journal reclaim")\
+ x(move_bytes_in_flight, u32, \
+ OPT_HUMAN_READABLE|OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(1024, U32_MAX), \
+ BCH2_NO_SB_OPT, 1U << 20, \
+ NULL, "Maximum Amount of IO to keep in flight by the move path")\
+ x(move_ios_in_flight, u32, \
+ OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(1, 1024), \
+ BCH2_NO_SB_OPT, 32, \
+ NULL, "Maximum number of IOs to keep in flight by the move path")\
+ x(fsck, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Run fsck on mount") \
+ x(fix_errors, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_FN(bch2_opt_fix_errors), \
+ BCH2_NO_SB_OPT, FSCK_FIX_exit, \
+ NULL, "Fix errors during fsck without asking") \
+ x(ratelimit_errors, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, RATELIMIT_ERRORS_DEFAULT, \
+ NULL, "Ratelimit error messages during fsck") \
+ x(nochanges, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Super read only mode - no writes at all will be issued,\n"\
+ "even if we have to replay the journal") \
+ x(norecovery, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Don't replay the journal") \
+ x(keep_journal, u8, \
+ 0, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Don't free journal entries/keys after startup")\
+ x(read_entire_journal, u8, \
+ 0, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Read all journal entries, not just dirty ones")\
+ x(read_journal_only, u8, \
+ 0, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Only read the journal, skip the rest of recovery")\
+ x(journal_transaction_names, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_JOURNAL_TRANSACTION_NAMES, true, \
+ NULL, "Log transaction function names in journal") \
+ x(noexcl, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Don't open device in exclusive mode") \
+ x(direct_io, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, true, \
+ NULL, "Use O_DIRECT (userspace only)") \
+ x(sb, u64, \
+ OPT_MOUNT, \
+ OPT_UINT(0, S64_MAX), \
+ BCH2_NO_SB_OPT, BCH_SB_SECTOR, \
+ "offset", "Sector offset of superblock") \
+ x(read_only, u8, \
+ OPT_FS, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, NULL) \
+ x(nostart, u8, \
+ 0, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Don\'t start filesystem, only open devices") \
+ x(reconstruct_alloc, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Reconstruct alloc btree") \
+ x(version_upgrade, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_STR(bch2_version_upgrade_opts), \
+ BCH_SB_VERSION_UPGRADE, BCH_VERSION_UPGRADE_compatible, \
+ NULL, "Set superblock to latest version,\n" \
+ "allowing any new features to be used") \
+ x(buckets_nouse, u8, \
+ 0, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Allocate the buckets_nouse bitmap") \
+ x(project, u8, \
+ OPT_INODE, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, NULL) \
+ x(nocow, u8, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
+ OPT_BOOL(), \
+ BCH_SB_NOCOW, false, \
+ NULL, "Nocow mode: Writes will be done in place when possible.\n"\
+ "Snapshots and reflink will still caused writes to be COW\n"\
+ "Implicitly disables data checksumming, compression and encryption")\
+ x(nocow_enabled, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, true, \
+ NULL, "Enable nocow mode: enables runtime locking in\n"\
+ "data move path needed if nocow will ever be in use\n")\
+ x(no_data_io, u8, \
+ OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH2_NO_SB_OPT, false, \
+ NULL, "Skip submit_bio() for data reads and writes, " \
+ "for performance testing purposes") \
+ x(fs_size, u64, \
+ OPT_DEVICE, \
+ OPT_UINT(0, S64_MAX), \
+ BCH2_NO_SB_OPT, 0, \
+ "size", "Size of filesystem on device") \
+ x(bucket, u32, \
+ OPT_DEVICE, \
+ OPT_UINT(0, S64_MAX), \
+ BCH2_NO_SB_OPT, 0, \
+ "size", "Size of filesystem on device") \
+ x(durability, u8, \
+ OPT_DEVICE, \
+ OPT_UINT(0, BCH_REPLICAS_MAX), \
+ BCH2_NO_SB_OPT, 1, \
+ "n", "Data written to this device will be considered\n"\
+ "to have already been replicated n times")
+
+struct bch_opts {
+#define x(_name, _bits, ...) unsigned _name##_defined:1;
+ BCH_OPTS()
+#undef x
+
+#define x(_name, _bits, ...) _bits _name;
+ BCH_OPTS()
+#undef x
+};
+
+static const __maybe_unused struct bch_opts bch2_opts_default = {
+#define x(_name, _bits, _mode, _type, _sb_opt, _default, ...) \
+ ._name##_defined = true, \
+ ._name = _default, \
+
+ BCH_OPTS()
+#undef x
+};
+
+#define opt_defined(_opts, _name) ((_opts)._name##_defined)
+
+#define opt_get(_opts, _name) \
+ (opt_defined(_opts, _name) ? (_opts)._name : bch2_opts_default._name)
+
+#define opt_set(_opts, _name, _v) \
+do { \
+ (_opts)._name##_defined = true; \
+ (_opts)._name = _v; \
+} while (0)
+
+static inline struct bch_opts bch2_opts_empty(void)
+{
+ return (struct bch_opts) { 0 };
+}
+
+void bch2_opts_apply(struct bch_opts *, struct bch_opts);
+
+enum bch_opt_id {
+#define x(_name, ...) Opt_##_name,
+ BCH_OPTS()
+#undef x
+ bch2_opts_nr
+};
+
+struct bch_fs;
+struct printbuf;
+
+struct bch_option {
+ struct attribute attr;
+ u64 (*get_sb)(const struct bch_sb *);
+ void (*set_sb)(struct bch_sb *, u64);
+ enum opt_type type;
+ enum opt_flags flags;
+ u64 min, max;
+
+ const char * const *choices;
+
+ struct bch_opt_fn fn;
+
+ const char *hint;
+ const char *help;
+
+};
+
+extern const struct bch_option bch2_opt_table[];
+
+bool bch2_opt_defined_by_id(const struct bch_opts *, enum bch_opt_id);
+u64 bch2_opt_get_by_id(const struct bch_opts *, enum bch_opt_id);
+void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
+
+u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id);
+int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
+void __bch2_opt_set_sb(struct bch_sb *, const struct bch_option *, u64);
+void bch2_opt_set_sb(struct bch_fs *, const struct bch_option *, u64);
+
+int bch2_opt_lookup(const char *);
+int bch2_opt_validate(const struct bch_option *, u64, struct printbuf *);
+int bch2_opt_parse(struct bch_fs *, const struct bch_option *,
+ const char *, u64 *, struct printbuf *);
+
+#define OPT_SHOW_FULL_LIST (1 << 0)
+#define OPT_SHOW_MOUNT_STYLE (1 << 1)
+
+void bch2_opt_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *,
+ const struct bch_option *, u64, unsigned);
+
+int bch2_opt_check_may_set(struct bch_fs *, int, u64);
+int bch2_opts_check_may_set(struct bch_fs *);
+int bch2_parse_mount_opts(struct bch_fs *, struct bch_opts *, char *);
+
+/* inode opts: */
+
+struct bch_io_opts {
+#define x(_name, _bits) u##_bits _name;
+ BCH_INODE_OPTS()
+#undef x
+};
+
+struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts);
+bool bch2_opt_is_inode_opt(enum bch_opt_id);
+
+#endif /* _BCACHEFS_OPTS_H */
diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c
new file mode 100644
index 000000000000..de41f9a14492
--- /dev/null
+++ b/fs/bcachefs/printbuf.c
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: LGPL-2.1+
+/* Copyright (C) 2022 Kent Overstreet */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+
+#include "printbuf.h"
+
+static inline unsigned printbuf_linelen(struct printbuf *buf)
+{
+ return buf->pos - buf->last_newline;
+}
+
+int bch2_printbuf_make_room(struct printbuf *out, unsigned extra)
+{
+ unsigned new_size;
+ char *buf;
+
+ if (!out->heap_allocated)
+ return 0;
+
+ /* Reserved space for terminating nul: */
+ extra += 1;
+
+ if (out->pos + extra < out->size)
+ return 0;
+
+ new_size = roundup_pow_of_two(out->size + extra);
+
+ /*
+ * Note: output buffer must be freeable with kfree(), it's not required
+ * that the user use printbuf_exit().
+ */
+ buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_NOWAIT);
+
+ if (!buf) {
+ out->allocation_failure = true;
+ return -ENOMEM;
+ }
+
+ out->buf = buf;
+ out->size = new_size;
+ return 0;
+}
+
+void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args)
+{
+ int len;
+
+ do {
+ va_list args2;
+
+ va_copy(args2, args);
+ len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args2);
+ } while (len + 1 >= printbuf_remaining(out) &&
+ !bch2_printbuf_make_room(out, len + 1));
+
+ len = min_t(size_t, len,
+ printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
+ out->pos += len;
+}
+
+void bch2_prt_printf(struct printbuf *out, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+
+ do {
+ va_start(args, fmt);
+ len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args);
+ va_end(args);
+ } while (len + 1 >= printbuf_remaining(out) &&
+ !bch2_printbuf_make_room(out, len + 1));
+
+ len = min_t(size_t, len,
+ printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
+ out->pos += len;
+}
+
+/**
+ * bch2_printbuf_str() - returns printbuf's buf as a C string, guaranteed to be
+ * null terminated
+ * @buf: printbuf to terminate
+ * Returns: Printbuf contents, as a nul terminated C string
+ */
+const char *bch2_printbuf_str(const struct printbuf *buf)
+{
+ /*
+ * If we've written to a printbuf then it's guaranteed to be a null
+ * terminated string - but if we haven't, then we might not have
+ * allocated a buffer at all:
+ */
+ return buf->pos
+ ? buf->buf
+ : "";
+}
+
+/**
+ * bch2_printbuf_exit() - exit a printbuf, freeing memory it owns and poisoning it
+ * against accidental use.
+ * @buf: printbuf to exit
+ */
+void bch2_printbuf_exit(struct printbuf *buf)
+{
+ if (buf->heap_allocated) {
+ kfree(buf->buf);
+ buf->buf = ERR_PTR(-EINTR); /* poison value */
+ }
+}
+
+void bch2_printbuf_tabstops_reset(struct printbuf *buf)
+{
+ buf->nr_tabstops = 0;
+}
+
+void bch2_printbuf_tabstop_pop(struct printbuf *buf)
+{
+ if (buf->nr_tabstops)
+ --buf->nr_tabstops;
+}
+
+/*
+ * bch2_printbuf_tabstop_set() - add a tabstop, n spaces from the previous tabstop
+ *
+ * @buf: printbuf to control
+ * @spaces: number of spaces from previous tabpstop
+ *
+ * In the future this function may allocate memory if setting more than
+ * PRINTBUF_INLINE_TABSTOPS or setting tabstops more than 255 spaces from start
+ * of line.
+ */
+int bch2_printbuf_tabstop_push(struct printbuf *buf, unsigned spaces)
+{
+ unsigned prev_tabstop = buf->nr_tabstops
+ ? buf->_tabstops[buf->nr_tabstops - 1]
+ : 0;
+
+ if (WARN_ON(buf->nr_tabstops >= ARRAY_SIZE(buf->_tabstops)))
+ return -EINVAL;
+
+ buf->_tabstops[buf->nr_tabstops++] = prev_tabstop + spaces;
+ buf->has_indent_or_tabstops = true;
+ return 0;
+}
+
+/**
+ * bch2_printbuf_indent_add() - add to the current indent level
+ *
+ * @buf: printbuf to control
+ * @spaces: number of spaces to add to the current indent level
+ *
+ * Subsequent lines, and the current line if the output position is at the start
+ * of the current line, will be indented by @spaces more spaces.
+ */
+void bch2_printbuf_indent_add(struct printbuf *buf, unsigned spaces)
+{
+ if (WARN_ON_ONCE(buf->indent + spaces < buf->indent))
+ spaces = 0;
+
+ buf->indent += spaces;
+ prt_chars(buf, ' ', spaces);
+
+ buf->has_indent_or_tabstops = true;
+}
+
+/**
+ * bch2_printbuf_indent_sub() - subtract from the current indent level
+ *
+ * @buf: printbuf to control
+ * @spaces: number of spaces to subtract from the current indent level
+ *
+ * Subsequent lines, and the current line if the output position is at the start
+ * of the current line, will be indented by @spaces less spaces.
+ */
+void bch2_printbuf_indent_sub(struct printbuf *buf, unsigned spaces)
+{
+ if (WARN_ON_ONCE(spaces > buf->indent))
+ spaces = buf->indent;
+
+ if (buf->last_newline + buf->indent == buf->pos) {
+ buf->pos -= spaces;
+ printbuf_nul_terminate(buf);
+ }
+ buf->indent -= spaces;
+
+ if (!buf->indent && !buf->nr_tabstops)
+ buf->has_indent_or_tabstops = false;
+}
+
+void bch2_prt_newline(struct printbuf *buf)
+{
+ unsigned i;
+
+ bch2_printbuf_make_room(buf, 1 + buf->indent);
+
+ __prt_char(buf, '\n');
+
+ buf->last_newline = buf->pos;
+
+ for (i = 0; i < buf->indent; i++)
+ __prt_char(buf, ' ');
+
+ printbuf_nul_terminate(buf);
+
+ buf->last_field = buf->pos;
+ buf->cur_tabstop = 0;
+}
+
+/*
+ * Returns spaces from start of line, if set, or 0 if unset:
+ */
+static inline unsigned cur_tabstop(struct printbuf *buf)
+{
+ return buf->cur_tabstop < buf->nr_tabstops
+ ? buf->_tabstops[buf->cur_tabstop]
+ : 0;
+}
+
+static void __prt_tab(struct printbuf *out)
+{
+ int spaces = max_t(int, 0, cur_tabstop(out) - printbuf_linelen(out));
+
+ prt_chars(out, ' ', spaces);
+
+ out->last_field = out->pos;
+ out->cur_tabstop++;
+}
+
+/**
+ * bch2_prt_tab() - Advance printbuf to the next tabstop
+ * @out: printbuf to control
+ *
+ * Advance output to the next tabstop by printing spaces.
+ */
+void bch2_prt_tab(struct printbuf *out)
+{
+ if (WARN_ON(!cur_tabstop(out)))
+ return;
+
+ __prt_tab(out);
+}
+
+static void __prt_tab_rjust(struct printbuf *buf)
+{
+ unsigned move = buf->pos - buf->last_field;
+ int pad = (int) cur_tabstop(buf) - (int) printbuf_linelen(buf);
+
+ if (pad > 0) {
+ bch2_printbuf_make_room(buf, pad);
+
+ if (buf->last_field + pad < buf->size)
+ memmove(buf->buf + buf->last_field + pad,
+ buf->buf + buf->last_field,
+ min(move, buf->size - 1 - buf->last_field - pad));
+
+ if (buf->last_field < buf->size)
+ memset(buf->buf + buf->last_field, ' ',
+ min((unsigned) pad, buf->size - buf->last_field));
+
+ buf->pos += pad;
+ printbuf_nul_terminate(buf);
+ }
+
+ buf->last_field = buf->pos;
+ buf->cur_tabstop++;
+}
+
+/**
+ * bch2_prt_tab_rjust - Advance printbuf to the next tabstop, right justifying
+ * previous output
+ *
+ * @buf: printbuf to control
+ *
+ * Advance output to the next tabstop by inserting spaces immediately after the
+ * previous tabstop, right justifying previously outputted text.
+ */
+void bch2_prt_tab_rjust(struct printbuf *buf)
+{
+ if (WARN_ON(!cur_tabstop(buf)))
+ return;
+
+ __prt_tab_rjust(buf);
+}
+
+/**
+ * bch2_prt_bytes_indented() - Print an array of chars, handling embedded control characters
+ *
+ * @out: output printbuf
+ * @str: string to print
+ * @count: number of bytes to print
+ *
+ * The following contol characters are handled as so:
+ * \n: prt_newline newline that obeys current indent level
+ * \t: prt_tab advance to next tabstop
+ * \r: prt_tab_rjust advance to next tabstop, with right justification
+ */
+void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned count)
+{
+ const char *unprinted_start = str;
+ const char *end = str + count;
+
+ if (!out->has_indent_or_tabstops || out->suppress_indent_tabstop_handling) {
+ prt_bytes(out, str, count);
+ return;
+ }
+
+ while (str != end) {
+ switch (*str) {
+ case '\n':
+ prt_bytes(out, unprinted_start, str - unprinted_start);
+ unprinted_start = str + 1;
+ bch2_prt_newline(out);
+ break;
+ case '\t':
+ if (likely(cur_tabstop(out))) {
+ prt_bytes(out, unprinted_start, str - unprinted_start);
+ unprinted_start = str + 1;
+ __prt_tab(out);
+ }
+ break;
+ case '\r':
+ if (likely(cur_tabstop(out))) {
+ prt_bytes(out, unprinted_start, str - unprinted_start);
+ unprinted_start = str + 1;
+ __prt_tab_rjust(out);
+ }
+ break;
+ }
+
+ str++;
+ }
+
+ prt_bytes(out, unprinted_start, str - unprinted_start);
+}
+
+/**
+ * bch2_prt_human_readable_u64() - Print out a u64 in human readable units
+ * @out: output printbuf
+ * @v: integer to print
+ *
+ * Units of 2^10 (default) or 10^3 are controlled via @out->si_units
+ */
+void bch2_prt_human_readable_u64(struct printbuf *out, u64 v)
+{
+ bch2_printbuf_make_room(out, 10);
+ out->pos += string_get_size(v, 1, !out->si_units,
+ out->buf + out->pos,
+ printbuf_remaining_size(out));
+}
+
+/**
+ * bch2_prt_human_readable_s64() - Print out a s64 in human readable units
+ * @out: output printbuf
+ * @v: integer to print
+ *
+ * Units of 2^10 (default) or 10^3 are controlled via @out->si_units
+ */
+void bch2_prt_human_readable_s64(struct printbuf *out, s64 v)
+{
+ if (v < 0)
+ prt_char(out, '-');
+ bch2_prt_human_readable_u64(out, abs(v));
+}
+
+/**
+ * bch2_prt_units_u64() - Print out a u64 according to printbuf unit options
+ * @out: output printbuf
+ * @v: integer to print
+ *
+ * Units are either raw (default), or human reabable units (controlled via
+ * @buf->human_readable_units)
+ */
+void bch2_prt_units_u64(struct printbuf *out, u64 v)
+{
+ if (out->human_readable_units)
+ bch2_prt_human_readable_u64(out, v);
+ else
+ bch2_prt_printf(out, "%llu", v);
+}
+
+/**
+ * bch2_prt_units_s64() - Print out a s64 according to printbuf unit options
+ * @out: output printbuf
+ * @v: integer to print
+ *
+ * Units are either raw (default), or human reabable units (controlled via
+ * @buf->human_readable_units)
+ */
+void bch2_prt_units_s64(struct printbuf *out, s64 v)
+{
+ if (v < 0)
+ prt_char(out, '-');
+ bch2_prt_units_u64(out, abs(v));
+}
+
+void bch2_prt_string_option(struct printbuf *out,
+ const char * const list[],
+ size_t selected)
+{
+ size_t i;
+
+ for (i = 0; list[i]; i++)
+ bch2_prt_printf(out, i == selected ? "[%s] " : "%s ", list[i]);
+}
+
+void bch2_prt_bitflags(struct printbuf *out,
+ const char * const list[], u64 flags)
+{
+ unsigned bit, nr = 0;
+ bool first = true;
+
+ while (list[nr])
+ nr++;
+
+ while (flags && (bit = __ffs(flags)) < nr) {
+ if (!first)
+ bch2_prt_printf(out, ",");
+ first = false;
+ bch2_prt_printf(out, "%s", list[bit]);
+ flags ^= 1 << bit;
+ }
+}
diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h
new file mode 100644
index 000000000000..2191423d9f22
--- /dev/null
+++ b/fs/bcachefs/printbuf.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022 Kent Overstreet */
+
+#ifndef _BCACHEFS_PRINTBUF_H
+#define _BCACHEFS_PRINTBUF_H
+
+/*
+ * Printbufs: Simple strings for printing to, with optional heap allocation
+ *
+ * This code has provisions for use in userspace, to aid in making other code
+ * portable between kernelspace and userspace.
+ *
+ * Basic example:
+ * struct printbuf buf = PRINTBUF;
+ *
+ * prt_printf(&buf, "foo=");
+ * foo_to_text(&buf, foo);
+ * printk("%s", buf.buf);
+ * printbuf_exit(&buf);
+ *
+ * Or
+ * struct printbuf buf = PRINTBUF_EXTERN(char_buf, char_buf_size)
+ *
+ * We can now write pretty printers instead of writing code that dumps
+ * everything to the kernel log buffer, and then those pretty-printers can be
+ * used by other code that outputs to kernel log, sysfs, debugfs, etc.
+ *
+ * Memory allocation: Outputing to a printbuf may allocate memory. This
+ * allocation is done with GFP_KERNEL, by default: use the newer
+ * memalloc_*_(save|restore) functions as needed.
+ *
+ * Since no equivalent yet exists for GFP_ATOMIC/GFP_NOWAIT, memory allocations
+ * will be done with GFP_NOWAIT if printbuf->atomic is nonzero.
+ *
+ * It's allowed to grab the output buffer and free it later with kfree() instead
+ * of using printbuf_exit(), if the user just needs a heap allocated string at
+ * the end.
+ *
+ * Memory allocation failures: We don't return errors directly, because on
+ * memory allocation failure we usually don't want to bail out and unwind - we
+ * want to print what we've got, on a best-effort basis. But code that does want
+ * to return -ENOMEM may check printbuf.allocation_failure.
+ *
+ * Indenting, tabstops:
+ *
+ * To aid is writing multi-line pretty printers spread across multiple
+ * functions, printbufs track the current indent level.
+ *
+ * printbuf_indent_push() and printbuf_indent_pop() increase and decrease the current indent
+ * level, respectively.
+ *
+ * To use tabstops, set printbuf->tabstops[]; they are in units of spaces, from
+ * start of line. Once set, prt_tab() will output spaces up to the next tabstop.
+ * prt_tab_rjust() will also advance the current line of text up to the next
+ * tabstop, but it does so by shifting text since the previous tabstop up to the
+ * next tabstop - right justifying it.
+ *
+ * Make sure you use prt_newline() instead of \n in the format string for indent
+ * level and tabstops to work corretly.
+ *
+ * Output units: printbuf->units exists to tell pretty-printers how to output
+ * numbers: a raw value (e.g. directly from a superblock field), as bytes, or as
+ * human readable bytes. prt_units() obeys it.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+enum printbuf_si {
+ PRINTBUF_UNITS_2, /* use binary powers of 2^10 */
+ PRINTBUF_UNITS_10, /* use powers of 10^3 (standard SI) */
+};
+
+#define PRINTBUF_INLINE_TABSTOPS 6
+
+struct printbuf {
+ char *buf;
+ unsigned size;
+ unsigned pos;
+ unsigned last_newline;
+ unsigned last_field;
+ unsigned indent;
+ /*
+ * If nonzero, allocations will be done with GFP_ATOMIC:
+ */
+ u8 atomic;
+ bool allocation_failure:1;
+ bool heap_allocated:1;
+ enum printbuf_si si_units:1;
+ bool human_readable_units:1;
+ bool has_indent_or_tabstops:1;
+ bool suppress_indent_tabstop_handling:1;
+ u8 nr_tabstops;
+
+ /*
+ * Do not modify directly: use printbuf_tabstop_add(),
+ * printbuf_tabstop_get()
+ */
+ u8 cur_tabstop;
+ u8 _tabstops[PRINTBUF_INLINE_TABSTOPS];
+};
+
+int bch2_printbuf_make_room(struct printbuf *, unsigned);
+__printf(2, 3) void bch2_prt_printf(struct printbuf *out, const char *fmt, ...);
+__printf(2, 0) void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list);
+const char *bch2_printbuf_str(const struct printbuf *);
+void bch2_printbuf_exit(struct printbuf *);
+
+void bch2_printbuf_tabstops_reset(struct printbuf *);
+void bch2_printbuf_tabstop_pop(struct printbuf *);
+int bch2_printbuf_tabstop_push(struct printbuf *, unsigned);
+
+void bch2_printbuf_indent_add(struct printbuf *, unsigned);
+void bch2_printbuf_indent_sub(struct printbuf *, unsigned);
+
+void bch2_prt_newline(struct printbuf *);
+void bch2_prt_tab(struct printbuf *);
+void bch2_prt_tab_rjust(struct printbuf *);
+
+void bch2_prt_bytes_indented(struct printbuf *, const char *, unsigned);
+void bch2_prt_human_readable_u64(struct printbuf *, u64);
+void bch2_prt_human_readable_s64(struct printbuf *, s64);
+void bch2_prt_units_u64(struct printbuf *, u64);
+void bch2_prt_units_s64(struct printbuf *, s64);
+void bch2_prt_string_option(struct printbuf *, const char * const[], size_t);
+void bch2_prt_bitflags(struct printbuf *, const char * const[], u64);
+
+/* Initializer for a heap allocated printbuf: */
+#define PRINTBUF ((struct printbuf) { .heap_allocated = true })
+
+/* Initializer a printbuf that points to an external buffer: */
+#define PRINTBUF_EXTERN(_buf, _size) \
+((struct printbuf) { \
+ .buf = _buf, \
+ .size = _size, \
+})
+
+/*
+ * Returns size remaining of output buffer:
+ */
+static inline unsigned printbuf_remaining_size(struct printbuf *out)
+{
+ return out->pos < out->size ? out->size - out->pos : 0;
+}
+
+/*
+ * Returns number of characters we can print to the output buffer - i.e.
+ * excluding the terminating nul:
+ */
+static inline unsigned printbuf_remaining(struct printbuf *out)
+{
+ return out->pos < out->size ? out->size - out->pos - 1 : 0;
+}
+
+static inline unsigned printbuf_written(struct printbuf *out)
+{
+ return out->size ? min(out->pos, out->size - 1) : 0;
+}
+
+/*
+ * Returns true if output was truncated:
+ */
+static inline bool printbuf_overflowed(struct printbuf *out)
+{
+ return out->pos >= out->size;
+}
+
+static inline void printbuf_nul_terminate(struct printbuf *out)
+{
+ bch2_printbuf_make_room(out, 1);
+
+ if (out->pos < out->size)
+ out->buf[out->pos] = 0;
+ else if (out->size)
+ out->buf[out->size - 1] = 0;
+}
+
+/* Doesn't call bch2_printbuf_make_room(), doesn't nul terminate: */
+static inline void __prt_char_reserved(struct printbuf *out, char c)
+{
+ if (printbuf_remaining(out))
+ out->buf[out->pos] = c;
+ out->pos++;
+}
+
+/* Doesn't nul terminate: */
+static inline void __prt_char(struct printbuf *out, char c)
+{
+ bch2_printbuf_make_room(out, 1);
+ __prt_char_reserved(out, c);
+}
+
+static inline void prt_char(struct printbuf *out, char c)
+{
+ __prt_char(out, c);
+ printbuf_nul_terminate(out);
+}
+
+static inline void __prt_chars_reserved(struct printbuf *out, char c, unsigned n)
+{
+ unsigned i, can_print = min(n, printbuf_remaining(out));
+
+ for (i = 0; i < can_print; i++)
+ out->buf[out->pos++] = c;
+ out->pos += n - can_print;
+}
+
+static inline void prt_chars(struct printbuf *out, char c, unsigned n)
+{
+ bch2_printbuf_make_room(out, n);
+ __prt_chars_reserved(out, c, n);
+ printbuf_nul_terminate(out);
+}
+
+static inline void prt_bytes(struct printbuf *out, const void *b, unsigned n)
+{
+ unsigned i, can_print;
+
+ bch2_printbuf_make_room(out, n);
+
+ can_print = min(n, printbuf_remaining(out));
+
+ for (i = 0; i < can_print; i++)
+ out->buf[out->pos++] = ((char *) b)[i];
+ out->pos += n - can_print;
+
+ printbuf_nul_terminate(out);
+}
+
+static inline void prt_str(struct printbuf *out, const char *str)
+{
+ prt_bytes(out, str, strlen(str));
+}
+
+static inline void prt_str_indented(struct printbuf *out, const char *str)
+{
+ bch2_prt_bytes_indented(out, str, strlen(str));
+}
+
+static inline void prt_hex_byte(struct printbuf *out, u8 byte)
+{
+ bch2_printbuf_make_room(out, 2);
+ __prt_char_reserved(out, hex_asc_hi(byte));
+ __prt_char_reserved(out, hex_asc_lo(byte));
+ printbuf_nul_terminate(out);
+}
+
+static inline void prt_hex_byte_upper(struct printbuf *out, u8 byte)
+{
+ bch2_printbuf_make_room(out, 2);
+ __prt_char_reserved(out, hex_asc_upper_hi(byte));
+ __prt_char_reserved(out, hex_asc_upper_lo(byte));
+ printbuf_nul_terminate(out);
+}
+
+/**
+ * printbuf_reset - re-use a printbuf without freeing and re-initializing it:
+ */
+static inline void printbuf_reset(struct printbuf *buf)
+{
+ buf->pos = 0;
+ buf->allocation_failure = 0;
+ buf->indent = 0;
+ buf->nr_tabstops = 0;
+ buf->cur_tabstop = 0;
+}
+
+/**
+ * printbuf_atomic_inc - mark as entering an atomic section
+ */
+static inline void printbuf_atomic_inc(struct printbuf *buf)
+{
+ buf->atomic++;
+}
+
+/**
+ * printbuf_atomic_inc - mark as leaving an atomic section
+ */
+static inline void printbuf_atomic_dec(struct printbuf *buf)
+{
+ buf->atomic--;
+}
+
+#endif /* _BCACHEFS_PRINTBUF_H */
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
new file mode 100644
index 000000000000..cb68ae44d597
--- /dev/null
+++ b/fs/bcachefs/quota.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "btree_update.h"
+#include "errcode.h"
+#include "error.h"
+#include "inode.h"
+#include "quota.h"
+#include "snapshot.h"
+#include "super-io.h"
+
+static const char * const bch2_quota_types[] = {
+ "user",
+ "group",
+ "project",
+};
+
+static const char * const bch2_quota_counters[] = {
+ "space",
+ "inodes",
+};
+
+static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_quota *q = field_to_type(f, quota);
+
+ if (vstruct_bytes(&q->field) < sizeof(*q)) {
+ prt_printf(err, "wrong size (got %zu should be %zu)",
+ vstruct_bytes(&q->field), sizeof(*q));
+ return -BCH_ERR_invalid_sb_quota;
+ }
+
+ return 0;
+}
+
+static void bch2_sb_quota_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_quota *q = field_to_type(f, quota);
+ unsigned qtyp, counter;
+
+ for (qtyp = 0; qtyp < ARRAY_SIZE(q->q); qtyp++) {
+ prt_printf(out, "%s: flags %llx",
+ bch2_quota_types[qtyp],
+ le64_to_cpu(q->q[qtyp].flags));
+
+ for (counter = 0; counter < Q_COUNTERS; counter++)
+ prt_printf(out, " %s timelimit %u warnlimit %u",
+ bch2_quota_counters[counter],
+ le32_to_cpu(q->q[qtyp].c[counter].timelimit),
+ le32_to_cpu(q->q[qtyp].c[counter].warnlimit));
+
+ prt_newline(out);
+ }
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_quota = {
+ .validate = bch2_sb_quota_validate,
+ .to_text = bch2_sb_quota_to_text,
+};
+
+int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (k.k->p.inode >= QTYP_NR) {
+ prt_printf(err, "invalid quota type (%llu >= %u)",
+ k.k->p.inode, QTYP_NR);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_quota dq = bkey_s_c_to_quota(k);
+ unsigned i;
+
+ for (i = 0; i < Q_COUNTERS; i++)
+ prt_printf(out, "%s hardlimit %llu softlimit %llu",
+ bch2_quota_counters[i],
+ le64_to_cpu(dq.v->c[i].hardlimit),
+ le64_to_cpu(dq.v->c[i].softlimit));
+}
+
+#ifdef CONFIG_BCACHEFS_QUOTA
+
+#include <linux/cred.h>
+#include <linux/fs.h>
+#include <linux/quota.h>
+
+static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
+{
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 20);
+
+ prt_str(out, "i_fieldmask");
+ prt_tab(out);
+ prt_printf(out, "%x", i->i_fieldmask);
+ prt_newline(out);
+
+ prt_str(out, "i_flags");
+ prt_tab(out);
+ prt_printf(out, "%u", i->i_flags);
+ prt_newline(out);
+
+ prt_str(out, "i_spc_timelimit");
+ prt_tab(out);
+ prt_printf(out, "%u", i->i_spc_timelimit);
+ prt_newline(out);
+
+ prt_str(out, "i_ino_timelimit");
+ prt_tab(out);
+ prt_printf(out, "%u", i->i_ino_timelimit);
+ prt_newline(out);
+
+ prt_str(out, "i_rt_spc_timelimit");
+ prt_tab(out);
+ prt_printf(out, "%u", i->i_rt_spc_timelimit);
+ prt_newline(out);
+
+ prt_str(out, "i_spc_warnlimit");
+ prt_tab(out);
+ prt_printf(out, "%u", i->i_spc_warnlimit);
+ prt_newline(out);
+
+ prt_str(out, "i_ino_warnlimit");
+ prt_tab(out);
+ prt_printf(out, "%u", i->i_ino_warnlimit);
+ prt_newline(out);
+
+ prt_str(out, "i_rt_spc_warnlimit");
+ prt_tab(out);
+ prt_printf(out, "%u", i->i_rt_spc_warnlimit);
+ prt_newline(out);
+}
+
+static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
+{
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 20);
+
+ prt_str(out, "d_fieldmask");
+ prt_tab(out);
+ prt_printf(out, "%x", q->d_fieldmask);
+ prt_newline(out);
+
+ prt_str(out, "d_spc_hardlimit");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_spc_hardlimit);
+ prt_newline(out);
+
+ prt_str(out, "d_spc_softlimit");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_spc_softlimit);
+ prt_newline(out);
+
+ prt_str(out, "d_ino_hardlimit");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_ino_hardlimit);
+ prt_newline(out);
+
+ prt_str(out, "d_ino_softlimit");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_ino_softlimit);
+ prt_newline(out);
+
+ prt_str(out, "d_space");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_space);
+ prt_newline(out);
+
+ prt_str(out, "d_ino_count");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_ino_count);
+ prt_newline(out);
+
+ prt_str(out, "d_ino_timer");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_ino_timer);
+ prt_newline(out);
+
+ prt_str(out, "d_spc_timer");
+ prt_tab(out);
+ prt_printf(out, "%llu", q->d_spc_timer);
+ prt_newline(out);
+
+ prt_str(out, "d_ino_warns");
+ prt_tab(out);
+ prt_printf(out, "%i", q->d_ino_warns);
+ prt_newline(out);
+
+ prt_str(out, "d_spc_warns");
+ prt_tab(out);
+ prt_printf(out, "%i", q->d_spc_warns);
+ prt_newline(out);
+}
+
+static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
+{
+ qtypes >>= i;
+ return qtypes ? i + __ffs(qtypes) : QTYP_NR;
+}
+
+#define for_each_set_qtype(_c, _i, _q, _qtypes) \
+ for (_i = 0; \
+ (_i = __next_qtype(_i, _qtypes), \
+ _q = &(_c)->quotas[_i], \
+ _i < QTYP_NR); \
+ _i++)
+
+static bool ignore_hardlimit(struct bch_memquota_type *q)
+{
+ if (capable(CAP_SYS_RESOURCE))
+ return true;
+#if 0
+ struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
+
+ return capable(CAP_SYS_RESOURCE) &&
+ (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
+ !(info->dqi_flags & DQF_ROOT_SQUASH));
+#endif
+ return false;
+}
+
+enum quota_msg {
+ SOFTWARN, /* Softlimit reached */
+ SOFTLONGWARN, /* Grace time expired */
+ HARDWARN, /* Hardlimit reached */
+
+ HARDBELOW, /* Usage got below inode hardlimit */
+ SOFTBELOW, /* Usage got below inode softlimit */
+};
+
+static int quota_nl[][Q_COUNTERS] = {
+ [HARDWARN][Q_SPC] = QUOTA_NL_BHARDWARN,
+ [SOFTLONGWARN][Q_SPC] = QUOTA_NL_BSOFTLONGWARN,
+ [SOFTWARN][Q_SPC] = QUOTA_NL_BSOFTWARN,
+ [HARDBELOW][Q_SPC] = QUOTA_NL_BHARDBELOW,
+ [SOFTBELOW][Q_SPC] = QUOTA_NL_BSOFTBELOW,
+
+ [HARDWARN][Q_INO] = QUOTA_NL_IHARDWARN,
+ [SOFTLONGWARN][Q_INO] = QUOTA_NL_ISOFTLONGWARN,
+ [SOFTWARN][Q_INO] = QUOTA_NL_ISOFTWARN,
+ [HARDBELOW][Q_INO] = QUOTA_NL_IHARDBELOW,
+ [SOFTBELOW][Q_INO] = QUOTA_NL_ISOFTBELOW,
+};
+
+struct quota_msgs {
+ u8 nr;
+ struct {
+ u8 qtype;
+ u8 msg;
+ } m[QTYP_NR * Q_COUNTERS];
+};
+
+static void prepare_msg(unsigned qtype,
+ enum quota_counters counter,
+ struct quota_msgs *msgs,
+ enum quota_msg msg_type)
+{
+ BUG_ON(msgs->nr >= ARRAY_SIZE(msgs->m));
+
+ msgs->m[msgs->nr].qtype = qtype;
+ msgs->m[msgs->nr].msg = quota_nl[msg_type][counter];
+ msgs->nr++;
+}
+
+static void prepare_warning(struct memquota_counter *qc,
+ unsigned qtype,
+ enum quota_counters counter,
+ struct quota_msgs *msgs,
+ enum quota_msg msg_type)
+{
+ if (qc->warning_issued & (1 << msg_type))
+ return;
+
+ prepare_msg(qtype, counter, msgs, msg_type);
+}
+
+static void flush_warnings(struct bch_qid qid,
+ struct super_block *sb,
+ struct quota_msgs *msgs)
+{
+ unsigned i;
+
+ for (i = 0; i < msgs->nr; i++)
+ quota_send_warning(make_kqid(&init_user_ns, msgs->m[i].qtype, qid.q[i]),
+ sb->s_dev, msgs->m[i].msg);
+}
+
+static int bch2_quota_check_limit(struct bch_fs *c,
+ unsigned qtype,
+ struct bch_memquota *mq,
+ struct quota_msgs *msgs,
+ enum quota_counters counter,
+ s64 v,
+ enum quota_acct_mode mode)
+{
+ struct bch_memquota_type *q = &c->quotas[qtype];
+ struct memquota_counter *qc = &mq->c[counter];
+ u64 n = qc->v + v;
+
+ BUG_ON((s64) n < 0);
+
+ if (mode == KEY_TYPE_QUOTA_NOCHECK)
+ return 0;
+
+ if (v <= 0) {
+ if (n < qc->hardlimit &&
+ (qc->warning_issued & (1 << HARDWARN))) {
+ qc->warning_issued &= ~(1 << HARDWARN);
+ prepare_msg(qtype, counter, msgs, HARDBELOW);
+ }
+
+ if (n < qc->softlimit &&
+ (qc->warning_issued & (1 << SOFTWARN))) {
+ qc->warning_issued &= ~(1 << SOFTWARN);
+ prepare_msg(qtype, counter, msgs, SOFTBELOW);
+ }
+
+ qc->warning_issued = 0;
+ return 0;
+ }
+
+ if (qc->hardlimit &&
+ qc->hardlimit < n &&
+ !ignore_hardlimit(q)) {
+ prepare_warning(qc, qtype, counter, msgs, HARDWARN);
+ return -EDQUOT;
+ }
+
+ if (qc->softlimit &&
+ qc->softlimit < n) {
+ if (qc->timer == 0) {
+ qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit;
+ prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
+ } else if (ktime_get_real_seconds() >= qc->timer &&
+ !ignore_hardlimit(q)) {
+ prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
+ return -EDQUOT;
+ }
+ }
+
+ return 0;
+}
+
+int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
+ enum quota_counters counter, s64 v,
+ enum quota_acct_mode mode)
+{
+ unsigned qtypes = enabled_qtypes(c);
+ struct bch_memquota_type *q;
+ struct bch_memquota *mq[QTYP_NR];
+ struct quota_msgs msgs;
+ unsigned i;
+ int ret = 0;
+
+ memset(&msgs, 0, sizeof(msgs));
+
+ for_each_set_qtype(c, i, q, qtypes) {
+ mq[i] = genradix_ptr_alloc(&q->table, qid.q[i], GFP_KERNEL);
+ if (!mq[i])
+ return -ENOMEM;
+ }
+
+ for_each_set_qtype(c, i, q, qtypes)
+ mutex_lock_nested(&q->lock, i);
+
+ for_each_set_qtype(c, i, q, qtypes) {
+ ret = bch2_quota_check_limit(c, i, mq[i], &msgs, counter, v, mode);
+ if (ret)
+ goto err;
+ }
+
+ for_each_set_qtype(c, i, q, qtypes)
+ mq[i]->c[counter].v += v;
+err:
+ for_each_set_qtype(c, i, q, qtypes)
+ mutex_unlock(&q->lock);
+
+ flush_warnings(qid, c->vfs_sb, &msgs);
+
+ return ret;
+}
+
+static void __bch2_quota_transfer(struct bch_memquota *src_q,
+ struct bch_memquota *dst_q,
+ enum quota_counters counter, s64 v)
+{
+ BUG_ON(v > src_q->c[counter].v);
+ BUG_ON(v + dst_q->c[counter].v < v);
+
+ src_q->c[counter].v -= v;
+ dst_q->c[counter].v += v;
+}
+
+int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
+ struct bch_qid dst,
+ struct bch_qid src, u64 space,
+ enum quota_acct_mode mode)
+{
+ struct bch_memquota_type *q;
+ struct bch_memquota *src_q[3], *dst_q[3];
+ struct quota_msgs msgs;
+ unsigned i;
+ int ret = 0;
+
+ qtypes &= enabled_qtypes(c);
+
+ memset(&msgs, 0, sizeof(msgs));
+
+ for_each_set_qtype(c, i, q, qtypes) {
+ src_q[i] = genradix_ptr_alloc(&q->table, src.q[i], GFP_KERNEL);
+ dst_q[i] = genradix_ptr_alloc(&q->table, dst.q[i], GFP_KERNEL);
+ if (!src_q[i] || !dst_q[i])
+ return -ENOMEM;
+ }
+
+ for_each_set_qtype(c, i, q, qtypes)
+ mutex_lock_nested(&q->lock, i);
+
+ for_each_set_qtype(c, i, q, qtypes) {
+ ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_SPC,
+ dst_q[i]->c[Q_SPC].v + space,
+ mode);
+ if (ret)
+ goto err;
+
+ ret = bch2_quota_check_limit(c, i, dst_q[i], &msgs, Q_INO,
+ dst_q[i]->c[Q_INO].v + 1,
+ mode);
+ if (ret)
+ goto err;
+ }
+
+ for_each_set_qtype(c, i, q, qtypes) {
+ __bch2_quota_transfer(src_q[i], dst_q[i], Q_SPC, space);
+ __bch2_quota_transfer(src_q[i], dst_q[i], Q_INO, 1);
+ }
+
+err:
+ for_each_set_qtype(c, i, q, qtypes)
+ mutex_unlock(&q->lock);
+
+ flush_warnings(dst, c->vfs_sb, &msgs);
+
+ return ret;
+}
+
+static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
+ struct qc_dqblk *qdq)
+{
+ struct bkey_s_c_quota dq;
+ struct bch_memquota_type *q;
+ struct bch_memquota *mq;
+ unsigned i;
+
+ BUG_ON(k.k->p.inode >= QTYP_NR);
+
+ if (!((1U << k.k->p.inode) & enabled_qtypes(c)))
+ return 0;
+
+ switch (k.k->type) {
+ case KEY_TYPE_quota:
+ dq = bkey_s_c_to_quota(k);
+ q = &c->quotas[k.k->p.inode];
+
+ mutex_lock(&q->lock);
+ mq = genradix_ptr_alloc(&q->table, k.k->p.offset, GFP_KERNEL);
+ if (!mq) {
+ mutex_unlock(&q->lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < Q_COUNTERS; i++) {
+ mq->c[i].hardlimit = le64_to_cpu(dq.v->c[i].hardlimit);
+ mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
+ }
+
+ if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
+ mq->c[Q_SPC].timer = qdq->d_spc_timer;
+ if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
+ mq->c[Q_SPC].warns = qdq->d_spc_warns;
+ if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
+ mq->c[Q_INO].timer = qdq->d_ino_timer;
+ if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
+ mq->c[Q_INO].warns = qdq->d_ino_warns;
+
+ mutex_unlock(&q->lock);
+ }
+
+ return 0;
+}
+
+void bch2_fs_quota_exit(struct bch_fs *c)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
+ genradix_free(&c->quotas[i].table);
+}
+
+void bch2_fs_quota_init(struct bch_fs *c)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(c->quotas); i++)
+ mutex_init(&c->quotas[i].lock);
+}
+
+static struct bch_sb_field_quota *bch2_sb_get_or_create_quota(struct bch_sb_handle *sb)
+{
+ struct bch_sb_field_quota *sb_quota = bch2_sb_field_get(sb->sb, quota);
+
+ if (sb_quota)
+ return sb_quota;
+
+ sb_quota = bch2_sb_field_resize(sb, quota, sizeof(*sb_quota) / sizeof(u64));
+ if (sb_quota) {
+ unsigned qtype, qc;
+
+ for (qtype = 0; qtype < QTYP_NR; qtype++)
+ for (qc = 0; qc < Q_COUNTERS; qc++)
+ sb_quota->q[qtype].c[qc].timelimit =
+ cpu_to_le32(7 * 24 * 60 * 60);
+ }
+
+ return sb_quota;
+}
+
+static void bch2_sb_quota_read(struct bch_fs *c)
+{
+ struct bch_sb_field_quota *sb_quota;
+ unsigned i, j;
+
+ sb_quota = bch2_sb_field_get(c->disk_sb.sb, quota);
+ if (!sb_quota)
+ return;
+
+ for (i = 0; i < QTYP_NR; i++) {
+ struct bch_memquota_type *q = &c->quotas[i];
+
+ for (j = 0; j < Q_COUNTERS; j++) {
+ q->limits[j].timelimit =
+ le32_to_cpu(sb_quota->q[i].c[j].timelimit);
+ q->limits[j].warnlimit =
+ le32_to_cpu(sb_quota->q[i].c[j].warnlimit);
+ }
+ }
+}
+
+static int bch2_fs_quota_read_inode(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked u;
+ struct bch_snapshot_tree s_t;
+ int ret;
+
+ ret = bch2_snapshot_tree_lookup(trans,
+ bch2_snapshot_tree(c, k.k->p.snapshot), &s_t);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "%s: snapshot tree %u not found", __func__,
+ snapshot_t(c, k.k->p.snapshot)->tree);
+ if (ret)
+ return ret;
+
+ if (!s_t.master_subvol)
+ goto advance;
+
+ ret = bch2_inode_find_by_inum_nowarn_trans(trans,
+ (subvol_inum) {
+ le32_to_cpu(s_t.master_subvol),
+ k.k->p.offset,
+ }, &u);
+ /*
+ * Inode might be deleted in this snapshot - the easiest way to handle
+ * that is to just skip it here:
+ */
+ if (bch2_err_matches(ret, ENOENT))
+ goto advance;
+
+ if (ret)
+ return ret;
+
+ bch2_quota_acct(c, bch_qid(&u), Q_SPC, u.bi_sectors,
+ KEY_TYPE_QUOTA_NOCHECK);
+ bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
+ KEY_TYPE_QUOTA_NOCHECK);
+advance:
+ bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ return 0;
+}
+
+int bch2_fs_quota_read(struct bch_fs *c)
+{
+ struct bch_sb_field_quota *sb_quota;
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ mutex_lock(&c->sb_lock);
+ sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
+ if (!sb_quota) {
+ mutex_unlock(&c->sb_lock);
+ return -BCH_ERR_ENOSPC_sb_quota;
+ }
+
+ bch2_sb_quota_read(c);
+ mutex_unlock(&c->sb_lock);
+
+ trans = bch2_trans_get(c);
+
+ ret = for_each_btree_key2(trans, iter, BTREE_ID_quotas,
+ POS_MIN, BTREE_ITER_PREFETCH, k,
+ __bch2_quota_set(c, k, NULL)) ?:
+ for_each_btree_key2(trans, iter, BTREE_ID_inodes,
+ POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ bch2_fs_quota_read_inode(trans, &iter, k));
+
+ bch2_trans_put(trans);
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/* Enable/disable/delete quotas for an entire filesystem: */
+
+static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ struct bch_sb_field_quota *sb_quota;
+ int ret = 0;
+
+ if (sb->s_flags & SB_RDONLY)
+ return -EROFS;
+
+ /* Accounting must be enabled at mount time: */
+ if (uflags & (FS_QUOTA_UDQ_ACCT|FS_QUOTA_GDQ_ACCT|FS_QUOTA_PDQ_ACCT))
+ return -EINVAL;
+
+ /* Can't enable enforcement without accounting: */
+ if ((uflags & FS_QUOTA_UDQ_ENFD) && !c->opts.usrquota)
+ return -EINVAL;
+
+ if ((uflags & FS_QUOTA_GDQ_ENFD) && !c->opts.grpquota)
+ return -EINVAL;
+
+ if (uflags & FS_QUOTA_PDQ_ENFD && !c->opts.prjquota)
+ return -EINVAL;
+
+ mutex_lock(&c->sb_lock);
+ sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
+ if (!sb_quota) {
+ ret = -BCH_ERR_ENOSPC_sb_quota;
+ goto unlock;
+ }
+
+ if (uflags & FS_QUOTA_UDQ_ENFD)
+ SET_BCH_SB_USRQUOTA(c->disk_sb.sb, true);
+
+ if (uflags & FS_QUOTA_GDQ_ENFD)
+ SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, true);
+
+ if (uflags & FS_QUOTA_PDQ_ENFD)
+ SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, true);
+
+ bch2_write_super(c);
+unlock:
+ mutex_unlock(&c->sb_lock);
+
+ return bch2_err_class(ret);
+}
+
+static int bch2_quota_disable(struct super_block *sb, unsigned uflags)
+{
+ struct bch_fs *c = sb->s_fs_info;
+
+ if (sb->s_flags & SB_RDONLY)
+ return -EROFS;
+
+ mutex_lock(&c->sb_lock);
+ if (uflags & FS_QUOTA_UDQ_ENFD)
+ SET_BCH_SB_USRQUOTA(c->disk_sb.sb, false);
+
+ if (uflags & FS_QUOTA_GDQ_ENFD)
+ SET_BCH_SB_GRPQUOTA(c->disk_sb.sb, false);
+
+ if (uflags & FS_QUOTA_PDQ_ENFD)
+ SET_BCH_SB_PRJQUOTA(c->disk_sb.sb, false);
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ return 0;
+}
+
+static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ int ret;
+
+ if (sb->s_flags & SB_RDONLY)
+ return -EROFS;
+
+ if (uflags & FS_USER_QUOTA) {
+ if (c->opts.usrquota)
+ return -EINVAL;
+
+ ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
+ POS(QTYP_USR, 0),
+ POS(QTYP_USR, U64_MAX),
+ 0, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (uflags & FS_GROUP_QUOTA) {
+ if (c->opts.grpquota)
+ return -EINVAL;
+
+ ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
+ POS(QTYP_GRP, 0),
+ POS(QTYP_GRP, U64_MAX),
+ 0, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (uflags & FS_PROJ_QUOTA) {
+ if (c->opts.prjquota)
+ return -EINVAL;
+
+ ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
+ POS(QTYP_PRJ, 0),
+ POS(QTYP_PRJ, U64_MAX),
+ 0, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Return quota status information, such as enforcements, quota file inode
+ * numbers etc.
+ */
+static int bch2_quota_get_state(struct super_block *sb, struct qc_state *state)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ unsigned qtypes = enabled_qtypes(c);
+ unsigned i;
+
+ memset(state, 0, sizeof(*state));
+
+ for (i = 0; i < QTYP_NR; i++) {
+ state->s_state[i].flags |= QCI_SYSFILE;
+
+ if (!(qtypes & (1 << i)))
+ continue;
+
+ state->s_state[i].flags |= QCI_ACCT_ENABLED;
+
+ state->s_state[i].spc_timelimit = c->quotas[i].limits[Q_SPC].timelimit;
+ state->s_state[i].spc_warnlimit = c->quotas[i].limits[Q_SPC].warnlimit;
+
+ state->s_state[i].ino_timelimit = c->quotas[i].limits[Q_INO].timelimit;
+ state->s_state[i].ino_warnlimit = c->quotas[i].limits[Q_INO].warnlimit;
+ }
+
+ return 0;
+}
+
+/*
+ * Adjust quota timers & warnings
+ */
+static int bch2_quota_set_info(struct super_block *sb, int type,
+ struct qc_info *info)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ struct bch_sb_field_quota *sb_quota;
+ int ret = 0;
+
+ if (0) {
+ struct printbuf buf = PRINTBUF;
+
+ qc_info_to_text(&buf, info);
+ pr_info("setting:\n%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ if (sb->s_flags & SB_RDONLY)
+ return -EROFS;
+
+ if (type >= QTYP_NR)
+ return -EINVAL;
+
+ if (!((1 << type) & enabled_qtypes(c)))
+ return -ESRCH;
+
+ if (info->i_fieldmask &
+ ~(QC_SPC_TIMER|QC_INO_TIMER|QC_SPC_WARNS|QC_INO_WARNS))
+ return -EINVAL;
+
+ mutex_lock(&c->sb_lock);
+ sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
+ if (!sb_quota) {
+ ret = -BCH_ERR_ENOSPC_sb_quota;
+ goto unlock;
+ }
+
+ if (info->i_fieldmask & QC_SPC_TIMER)
+ sb_quota->q[type].c[Q_SPC].timelimit =
+ cpu_to_le32(info->i_spc_timelimit);
+
+ if (info->i_fieldmask & QC_SPC_WARNS)
+ sb_quota->q[type].c[Q_SPC].warnlimit =
+ cpu_to_le32(info->i_spc_warnlimit);
+
+ if (info->i_fieldmask & QC_INO_TIMER)
+ sb_quota->q[type].c[Q_INO].timelimit =
+ cpu_to_le32(info->i_ino_timelimit);
+
+ if (info->i_fieldmask & QC_INO_WARNS)
+ sb_quota->q[type].c[Q_INO].warnlimit =
+ cpu_to_le32(info->i_ino_warnlimit);
+
+ bch2_sb_quota_read(c);
+
+ bch2_write_super(c);
+unlock:
+ mutex_unlock(&c->sb_lock);
+
+ return bch2_err_class(ret);
+}
+
+/* Get/set individual quotas: */
+
+static void __bch2_quota_get(struct qc_dqblk *dst, struct bch_memquota *src)
+{
+ dst->d_space = src->c[Q_SPC].v << 9;
+ dst->d_spc_hardlimit = src->c[Q_SPC].hardlimit << 9;
+ dst->d_spc_softlimit = src->c[Q_SPC].softlimit << 9;
+ dst->d_spc_timer = src->c[Q_SPC].timer;
+ dst->d_spc_warns = src->c[Q_SPC].warns;
+
+ dst->d_ino_count = src->c[Q_INO].v;
+ dst->d_ino_hardlimit = src->c[Q_INO].hardlimit;
+ dst->d_ino_softlimit = src->c[Q_INO].softlimit;
+ dst->d_ino_timer = src->c[Q_INO].timer;
+ dst->d_ino_warns = src->c[Q_INO].warns;
+}
+
+static int bch2_get_quota(struct super_block *sb, struct kqid kqid,
+ struct qc_dqblk *qdq)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ struct bch_memquota_type *q = &c->quotas[kqid.type];
+ qid_t qid = from_kqid(&init_user_ns, kqid);
+ struct bch_memquota *mq;
+
+ memset(qdq, 0, sizeof(*qdq));
+
+ mutex_lock(&q->lock);
+ mq = genradix_ptr(&q->table, qid);
+ if (mq)
+ __bch2_quota_get(qdq, mq);
+ mutex_unlock(&q->lock);
+
+ return 0;
+}
+
+static int bch2_get_next_quota(struct super_block *sb, struct kqid *kqid,
+ struct qc_dqblk *qdq)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ struct bch_memquota_type *q = &c->quotas[kqid->type];
+ qid_t qid = from_kqid(&init_user_ns, *kqid);
+ struct genradix_iter iter;
+ struct bch_memquota *mq;
+ int ret = 0;
+
+ mutex_lock(&q->lock);
+
+ genradix_for_each_from(&q->table, iter, mq, qid)
+ if (memcmp(mq, page_address(ZERO_PAGE(0)), sizeof(*mq))) {
+ __bch2_quota_get(qdq, mq);
+ *kqid = make_kqid(current_user_ns(), kqid->type, iter.pos);
+ goto found;
+ }
+
+ ret = -ENOENT;
+found:
+ mutex_unlock(&q->lock);
+ return bch2_err_class(ret);
+}
+
+static int bch2_set_quota_trans(struct btree_trans *trans,
+ struct bkey_i_quota *new_quota,
+ struct qc_dqblk *qdq)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ ret = bkey_err(k);
+ if (unlikely(ret))
+ return ret;
+
+ if (k.k->type == KEY_TYPE_quota)
+ new_quota->v = *bkey_s_c_to_quota(k).v;
+
+ if (qdq->d_fieldmask & QC_SPC_SOFT)
+ new_quota->v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
+ if (qdq->d_fieldmask & QC_SPC_HARD)
+ new_quota->v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
+
+ if (qdq->d_fieldmask & QC_INO_SOFT)
+ new_quota->v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
+ if (qdq->d_fieldmask & QC_INO_HARD)
+ new_quota->v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
+
+ ret = bch2_trans_update(trans, &iter, &new_quota->k_i, 0);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int bch2_set_quota(struct super_block *sb, struct kqid qid,
+ struct qc_dqblk *qdq)
+{
+ struct bch_fs *c = sb->s_fs_info;
+ struct bkey_i_quota new_quota;
+ int ret;
+
+ if (0) {
+ struct printbuf buf = PRINTBUF;
+
+ qc_dqblk_to_text(&buf, qdq);
+ pr_info("setting:\n%s", buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ if (sb->s_flags & SB_RDONLY)
+ return -EROFS;
+
+ bkey_quota_init(&new_quota.k_i);
+ new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid));
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_set_quota_trans(trans, &new_quota, qdq)) ?:
+ __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq);
+
+ return bch2_err_class(ret);
+}
+
+const struct quotactl_ops bch2_quotactl_operations = {
+ .quota_enable = bch2_quota_enable,
+ .quota_disable = bch2_quota_disable,
+ .rm_xquota = bch2_quota_remove,
+
+ .get_state = bch2_quota_get_state,
+ .set_info = bch2_quota_set_info,
+
+ .get_dqblk = bch2_get_quota,
+ .get_nextdqblk = bch2_get_next_quota,
+ .set_dqblk = bch2_set_quota,
+};
+
+#endif /* CONFIG_BCACHEFS_QUOTA */
diff --git a/fs/bcachefs/quota.h b/fs/bcachefs/quota.h
new file mode 100644
index 000000000000..2f463874a362
--- /dev/null
+++ b/fs/bcachefs/quota.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_QUOTA_H
+#define _BCACHEFS_QUOTA_H
+
+#include "inode.h"
+#include "quota_types.h"
+
+enum bkey_invalid_flags;
+extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
+
+int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_quota ((struct bkey_ops) { \
+ .key_invalid = bch2_quota_invalid, \
+ .val_to_text = bch2_quota_to_text, \
+ .min_val_size = 32, \
+})
+
+static inline struct bch_qid bch_qid(struct bch_inode_unpacked *u)
+{
+ return (struct bch_qid) {
+ .q[QTYP_USR] = u->bi_uid,
+ .q[QTYP_GRP] = u->bi_gid,
+ .q[QTYP_PRJ] = u->bi_project ? u->bi_project - 1 : 0,
+ };
+}
+
+static inline unsigned enabled_qtypes(struct bch_fs *c)
+{
+ return ((c->opts.usrquota << QTYP_USR)|
+ (c->opts.grpquota << QTYP_GRP)|
+ (c->opts.prjquota << QTYP_PRJ));
+}
+
+#ifdef CONFIG_BCACHEFS_QUOTA
+
+int bch2_quota_acct(struct bch_fs *, struct bch_qid, enum quota_counters,
+ s64, enum quota_acct_mode);
+
+int bch2_quota_transfer(struct bch_fs *, unsigned, struct bch_qid,
+ struct bch_qid, u64, enum quota_acct_mode);
+
+void bch2_fs_quota_exit(struct bch_fs *);
+void bch2_fs_quota_init(struct bch_fs *);
+int bch2_fs_quota_read(struct bch_fs *);
+
+extern const struct quotactl_ops bch2_quotactl_operations;
+
+#else
+
+static inline int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
+ enum quota_counters counter, s64 v,
+ enum quota_acct_mode mode)
+{
+ return 0;
+}
+
+static inline int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
+ struct bch_qid dst,
+ struct bch_qid src, u64 space,
+ enum quota_acct_mode mode)
+{
+ return 0;
+}
+
+static inline void bch2_fs_quota_exit(struct bch_fs *c) {}
+static inline void bch2_fs_quota_init(struct bch_fs *c) {}
+static inline int bch2_fs_quota_read(struct bch_fs *c) { return 0; }
+
+#endif
+
+#endif /* _BCACHEFS_QUOTA_H */
diff --git a/fs/bcachefs/quota_types.h b/fs/bcachefs/quota_types.h
new file mode 100644
index 000000000000..6a136083d389
--- /dev/null
+++ b/fs/bcachefs/quota_types.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_QUOTA_TYPES_H
+#define _BCACHEFS_QUOTA_TYPES_H
+
+#include <linux/generic-radix-tree.h>
+
+struct bch_qid {
+ u32 q[QTYP_NR];
+};
+
+enum quota_acct_mode {
+ KEY_TYPE_QUOTA_PREALLOC,
+ KEY_TYPE_QUOTA_WARN,
+ KEY_TYPE_QUOTA_NOCHECK,
+};
+
+struct memquota_counter {
+ u64 v;
+ u64 hardlimit;
+ u64 softlimit;
+ s64 timer;
+ int warns;
+ int warning_issued;
+};
+
+struct bch_memquota {
+ struct memquota_counter c[Q_COUNTERS];
+};
+
+typedef GENRADIX(struct bch_memquota) bch_memquota_table;
+
+struct quota_limit {
+ u32 timelimit;
+ u32 warnlimit;
+};
+
+struct bch_memquota_type {
+ struct quota_limit limits[Q_COUNTERS];
+ bch_memquota_table table;
+ struct mutex lock;
+};
+
+#endif /* _BCACHEFS_QUOTA_TYPES_H */
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
new file mode 100644
index 000000000000..568f1e8e7507
--- /dev/null
+++ b/fs/bcachefs/rebalance.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "alloc_foreground.h"
+#include "btree_iter.h"
+#include "buckets.h"
+#include "clock.h"
+#include "compress.h"
+#include "disk_groups.h"
+#include "errcode.h"
+#include "move.h"
+#include "rebalance.h"
+#include "super-io.h"
+#include "trace.h"
+
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/sched/cputime.h>
+
+/*
+ * Check if an extent should be moved:
+ * returns -1 if it should not be moved, or
+ * device of pointer that should be moved, if known, or INT_MAX if unknown
+ */
+static bool rebalance_pred(struct bch_fs *c, void *arg,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts,
+ struct data_update_opts *data_opts)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ unsigned i;
+
+ data_opts->rewrite_ptrs = 0;
+ data_opts->target = io_opts->background_target;
+ data_opts->extra_replicas = 0;
+ data_opts->btree_insert_flags = 0;
+
+ if (io_opts->background_compression &&
+ !bch2_bkey_is_incompressible(k)) {
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ i = 0;
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if (!p.ptr.cached &&
+ p.crc.compression_type !=
+ bch2_compression_opt_to_type(io_opts->background_compression))
+ data_opts->rewrite_ptrs |= 1U << i;
+ i++;
+ }
+ }
+
+ if (io_opts->background_target) {
+ const struct bch_extent_ptr *ptr;
+
+ i = 0;
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (!ptr->cached &&
+ !bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
+ bch2_target_accepts_data(c, BCH_DATA_user, io_opts->background_target))
+ data_opts->rewrite_ptrs |= 1U << i;
+ i++;
+ }
+ }
+
+ return data_opts->rewrite_ptrs != 0;
+}
+
+void bch2_rebalance_add_key(struct bch_fs *c,
+ struct bkey_s_c k,
+ struct bch_io_opts *io_opts)
+{
+ struct data_update_opts update_opts = { 0 };
+ struct bkey_ptrs_c ptrs;
+ const struct bch_extent_ptr *ptr;
+ unsigned i;
+
+ if (!rebalance_pred(c, NULL, k, io_opts, &update_opts))
+ return;
+
+ i = 0;
+ ptrs = bch2_bkey_ptrs_c(k);
+ bkey_for_each_ptr(ptrs, ptr) {
+ if ((1U << i) && update_opts.rewrite_ptrs)
+ if (atomic64_add_return(k.k->size,
+ &bch_dev_bkey_exists(c, ptr->dev)->rebalance_work) ==
+ k.k->size)
+ rebalance_wakeup(c);
+ i++;
+ }
+}
+
+void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
+{
+ if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
+ sectors)
+ rebalance_wakeup(c);
+}
+
+struct rebalance_work {
+ int dev_most_full_idx;
+ unsigned dev_most_full_percent;
+ u64 dev_most_full_work;
+ u64 dev_most_full_capacity;
+ u64 total_work;
+};
+
+static void rebalance_work_accumulate(struct rebalance_work *w,
+ u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
+{
+ unsigned percent_full;
+ u64 work = dev_work + unknown_dev;
+
+ /* avoid divide by 0 */
+ if (!capacity)
+ return;
+
+ if (work < dev_work || work < unknown_dev)
+ work = U64_MAX;
+ work = min(work, capacity);
+
+ percent_full = div64_u64(work * 100, capacity);
+
+ if (percent_full >= w->dev_most_full_percent) {
+ w->dev_most_full_idx = idx;
+ w->dev_most_full_percent = percent_full;
+ w->dev_most_full_work = work;
+ w->dev_most_full_capacity = capacity;
+ }
+
+ if (w->total_work + dev_work >= w->total_work &&
+ w->total_work + dev_work >= dev_work)
+ w->total_work += dev_work;
+}
+
+static struct rebalance_work rebalance_work(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ struct rebalance_work ret = { .dev_most_full_idx = -1 };
+ u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
+ unsigned i;
+
+ for_each_online_member(ca, c, i)
+ rebalance_work_accumulate(&ret,
+ atomic64_read(&ca->rebalance_work),
+ unknown_dev,
+ bucket_to_sector(ca, ca->mi.nbuckets -
+ ca->mi.first_bucket),
+ i);
+
+ rebalance_work_accumulate(&ret,
+ unknown_dev, 0, c->capacity, -1);
+
+ return ret;
+}
+
+static void rebalance_work_reset(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ for_each_online_member(ca, c, i)
+ atomic64_set(&ca->rebalance_work, 0);
+
+ atomic64_set(&c->rebalance.work_unknown_dev, 0);
+}
+
+static unsigned long curr_cputime(void)
+{
+ u64 utime, stime;
+
+ task_cputime_adjusted(current, &utime, &stime);
+ return nsecs_to_jiffies(utime + stime);
+}
+
+static int bch2_rebalance_thread(void *arg)
+{
+ struct bch_fs *c = arg;
+ struct bch_fs_rebalance *r = &c->rebalance;
+ struct io_clock *clock = &c->io_clock[WRITE];
+ struct rebalance_work w, p;
+ struct bch_move_stats move_stats;
+ unsigned long start, prev_start;
+ unsigned long prev_run_time, prev_run_cputime;
+ unsigned long cputime, prev_cputime;
+ u64 io_start;
+ long throttle;
+
+ set_freezable();
+
+ io_start = atomic64_read(&clock->now);
+ p = rebalance_work(c);
+ prev_start = jiffies;
+ prev_cputime = curr_cputime();
+
+ bch2_move_stats_init(&move_stats, "rebalance");
+ while (!kthread_wait_freezable(r->enabled)) {
+ cond_resched();
+
+ start = jiffies;
+ cputime = curr_cputime();
+
+ prev_run_time = start - prev_start;
+ prev_run_cputime = cputime - prev_cputime;
+
+ w = rebalance_work(c);
+ BUG_ON(!w.dev_most_full_capacity);
+
+ if (!w.total_work) {
+ r->state = REBALANCE_WAITING;
+ kthread_wait_freezable(rebalance_work(c).total_work);
+ continue;
+ }
+
+ /*
+ * If there isn't much work to do, throttle cpu usage:
+ */
+ throttle = prev_run_cputime * 100 /
+ max(1U, w.dev_most_full_percent) -
+ prev_run_time;
+
+ if (w.dev_most_full_percent < 20 && throttle > 0) {
+ r->throttled_until_iotime = io_start +
+ div_u64(w.dev_most_full_capacity *
+ (20 - w.dev_most_full_percent),
+ 50);
+
+ if (atomic64_read(&clock->now) + clock->max_slop <
+ r->throttled_until_iotime) {
+ r->throttled_until_cputime = start + throttle;
+ r->state = REBALANCE_THROTTLED;
+
+ bch2_kthread_io_clock_wait(clock,
+ r->throttled_until_iotime,
+ throttle);
+ continue;
+ }
+ }
+
+ /* minimum 1 mb/sec: */
+ r->pd.rate.rate =
+ max_t(u64, 1 << 11,
+ r->pd.rate.rate *
+ max(p.dev_most_full_percent, 1U) /
+ max(w.dev_most_full_percent, 1U));
+
+ io_start = atomic64_read(&clock->now);
+ p = w;
+ prev_start = start;
+ prev_cputime = cputime;
+
+ r->state = REBALANCE_RUNNING;
+ memset(&move_stats, 0, sizeof(move_stats));
+ rebalance_work_reset(c);
+
+ bch2_move_data(c,
+ 0, POS_MIN,
+ BTREE_ID_NR, POS_MAX,
+ /* ratelimiting disabled for now */
+ NULL, /* &r->pd.rate, */
+ &move_stats,
+ writepoint_ptr(&c->rebalance_write_point),
+ true,
+ rebalance_pred, NULL);
+ }
+
+ return 0;
+}
+
+void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct bch_fs_rebalance *r = &c->rebalance;
+ struct rebalance_work w = rebalance_work(c);
+
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 20);
+
+ prt_printf(out, "fullest_dev (%i):", w.dev_most_full_idx);
+ prt_tab(out);
+
+ prt_human_readable_u64(out, w.dev_most_full_work << 9);
+ prt_printf(out, "/");
+ prt_human_readable_u64(out, w.dev_most_full_capacity << 9);
+ prt_newline(out);
+
+ prt_printf(out, "total work:");
+ prt_tab(out);
+
+ prt_human_readable_u64(out, w.total_work << 9);
+ prt_printf(out, "/");
+ prt_human_readable_u64(out, c->capacity << 9);
+ prt_newline(out);
+
+ prt_printf(out, "rate:");
+ prt_tab(out);
+ prt_printf(out, "%u", r->pd.rate.rate);
+ prt_newline(out);
+
+ switch (r->state) {
+ case REBALANCE_WAITING:
+ prt_printf(out, "waiting");
+ break;
+ case REBALANCE_THROTTLED:
+ prt_printf(out, "throttled for %lu sec or ",
+ (r->throttled_until_cputime - jiffies) / HZ);
+ prt_human_readable_u64(out,
+ (r->throttled_until_iotime -
+ atomic64_read(&c->io_clock[WRITE].now)) << 9);
+ prt_printf(out, " io");
+ break;
+ case REBALANCE_RUNNING:
+ prt_printf(out, "running");
+ break;
+ }
+ prt_newline(out);
+}
+
+void bch2_rebalance_stop(struct bch_fs *c)
+{
+ struct task_struct *p;
+
+ c->rebalance.pd.rate.rate = UINT_MAX;
+ bch2_ratelimit_reset(&c->rebalance.pd.rate);
+
+ p = rcu_dereference_protected(c->rebalance.thread, 1);
+ c->rebalance.thread = NULL;
+
+ if (p) {
+ /* for sychronizing with rebalance_wakeup() */
+ synchronize_rcu();
+
+ kthread_stop(p);
+ put_task_struct(p);
+ }
+}
+
+int bch2_rebalance_start(struct bch_fs *c)
+{
+ struct task_struct *p;
+ int ret;
+
+ if (c->rebalance.thread)
+ return 0;
+
+ if (c->opts.nochanges)
+ return 0;
+
+ p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
+ ret = PTR_ERR_OR_ZERO(p);
+ if (ret) {
+ bch_err_msg(c, ret, "creating rebalance thread");
+ return ret;
+ }
+
+ get_task_struct(p);
+ rcu_assign_pointer(c->rebalance.thread, p);
+ wake_up_process(p);
+ return 0;
+}
+
+void bch2_fs_rebalance_init(struct bch_fs *c)
+{
+ bch2_pd_controller_init(&c->rebalance.pd);
+
+ atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
+}
diff --git a/fs/bcachefs/rebalance.h b/fs/bcachefs/rebalance.h
new file mode 100644
index 000000000000..7ade0bb81cce
--- /dev/null
+++ b/fs/bcachefs/rebalance.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REBALANCE_H
+#define _BCACHEFS_REBALANCE_H
+
+#include "rebalance_types.h"
+
+static inline void rebalance_wakeup(struct bch_fs *c)
+{
+ struct task_struct *p;
+
+ rcu_read_lock();
+ p = rcu_dereference(c->rebalance.thread);
+ if (p)
+ wake_up_process(p);
+ rcu_read_unlock();
+}
+
+void bch2_rebalance_add_key(struct bch_fs *, struct bkey_s_c,
+ struct bch_io_opts *);
+void bch2_rebalance_add_work(struct bch_fs *, u64);
+
+void bch2_rebalance_work_to_text(struct printbuf *, struct bch_fs *);
+
+void bch2_rebalance_stop(struct bch_fs *);
+int bch2_rebalance_start(struct bch_fs *);
+void bch2_fs_rebalance_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_REBALANCE_H */
diff --git a/fs/bcachefs/rebalance_types.h b/fs/bcachefs/rebalance_types.h
new file mode 100644
index 000000000000..7462a92e9598
--- /dev/null
+++ b/fs/bcachefs/rebalance_types.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REBALANCE_TYPES_H
+#define _BCACHEFS_REBALANCE_TYPES_H
+
+#include "move_types.h"
+
+enum rebalance_state {
+ REBALANCE_WAITING,
+ REBALANCE_THROTTLED,
+ REBALANCE_RUNNING,
+};
+
+struct bch_fs_rebalance {
+ struct task_struct __rcu *thread;
+ struct bch_pd_controller pd;
+
+ atomic64_t work_unknown_dev;
+
+ enum rebalance_state state;
+ u64 throttled_until_iotime;
+ unsigned long throttled_until_cputime;
+
+ unsigned enabled:1;
+};
+
+#endif /* _BCACHEFS_REBALANCE_TYPES_H */
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
new file mode 100644
index 000000000000..4cd660650e5b
--- /dev/null
+++ b/fs/bcachefs/recovery.c
@@ -0,0 +1,1049 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "backpointers.h"
+#include "bkey_buf.h"
+#include "alloc_background.h"
+#include "btree_gc.h"
+#include "btree_journal_iter.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "btree_io.h"
+#include "buckets.h"
+#include "dirent.h"
+#include "ec.h"
+#include "errcode.h"
+#include "error.h"
+#include "fs-common.h"
+#include "fsck.h"
+#include "journal_io.h"
+#include "journal_reclaim.h"
+#include "journal_seq_blacklist.h"
+#include "lru.h"
+#include "logged_ops.h"
+#include "move.h"
+#include "quota.h"
+#include "recovery.h"
+#include "replicas.h"
+#include "sb-clean.h"
+#include "snapshot.h"
+#include "subvolume.h"
+#include "super-io.h"
+
+#include <linux/sort.h>
+#include <linux/stat.h>
+
+#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
+
+static bool btree_id_is_alloc(enum btree_id id)
+{
+ switch (id) {
+ case BTREE_ID_alloc:
+ case BTREE_ID_backpointers:
+ case BTREE_ID_need_discard:
+ case BTREE_ID_freespace:
+ case BTREE_ID_bucket_gens:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* for -o reconstruct_alloc: */
+static void drop_alloc_keys(struct journal_keys *keys)
+{
+ size_t src, dst;
+
+ for (src = 0, dst = 0; src < keys->nr; src++)
+ if (!btree_id_is_alloc(keys->d[src].btree_id))
+ keys->d[dst++] = keys->d[src];
+
+ keys->nr = dst;
+}
+
+/*
+ * Btree node pointers have a field to stack a pointer to the in memory btree
+ * node; we need to zero out this field when reading in btree nodes, or when
+ * reading in keys from the journal:
+ */
+static void zero_out_btree_mem_ptr(struct journal_keys *keys)
+{
+ struct journal_key *i;
+
+ for (i = keys->d; i < keys->d + keys->nr; i++)
+ if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
+ bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
+}
+
+/* journal replay: */
+
+static void replay_now_at(struct journal *j, u64 seq)
+{
+ BUG_ON(seq < j->replay_journal_seq);
+
+ seq = min(seq, j->replay_journal_seq_end);
+
+ while (j->replay_journal_seq < seq)
+ bch2_journal_pin_put(j, j->replay_journal_seq++);
+}
+
+static int bch2_journal_replay_key(struct btree_trans *trans,
+ struct journal_key *k)
+{
+ struct btree_iter iter;
+ unsigned iter_flags =
+ BTREE_ITER_INTENT|
+ BTREE_ITER_NOT_EXTENTS;
+ unsigned update_flags = BTREE_TRIGGER_NORUN;
+ int ret;
+
+ /*
+ * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
+ * keep the key cache coherent with the underlying btree. Nothing
+ * besides the allocator is doing updates yet so we don't need key cache
+ * coherency for non-alloc btrees, and key cache fills for snapshots
+ * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
+ * the snapshots recovery pass runs.
+ */
+ if (!k->level && k->btree_id == BTREE_ID_alloc)
+ iter_flags |= BTREE_ITER_CACHED;
+ else
+ update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
+
+ bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
+ BTREE_MAX_DEPTH, k->level,
+ iter_flags);
+ ret = bch2_btree_iter_traverse(&iter);
+ if (ret)
+ goto out;
+
+ /* Must be checked with btree locked: */
+ if (k->overwritten)
+ goto out;
+
+ ret = bch2_trans_update(trans, &iter, k->k, update_flags);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int journal_sort_seq_cmp(const void *_l, const void *_r)
+{
+ const struct journal_key *l = *((const struct journal_key **)_l);
+ const struct journal_key *r = *((const struct journal_key **)_r);
+
+ return cmp_int(l->journal_seq, r->journal_seq);
+}
+
+static int bch2_journal_replay(struct bch_fs *c)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ struct journal_key **keys_sorted, *k;
+ struct journal *j = &c->journal;
+ u64 start_seq = c->journal_replay_seq_start;
+ u64 end_seq = c->journal_replay_seq_start;
+ size_t i;
+ int ret;
+
+ move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
+ keys->gap = keys->nr;
+
+ keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL);
+ if (!keys_sorted)
+ return -BCH_ERR_ENOMEM_journal_replay;
+
+ for (i = 0; i < keys->nr; i++)
+ keys_sorted[i] = &keys->d[i];
+
+ sort(keys_sorted, keys->nr,
+ sizeof(keys_sorted[0]),
+ journal_sort_seq_cmp, NULL);
+
+ if (keys->nr) {
+ ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
+ keys->nr, start_seq, end_seq);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < keys->nr; i++) {
+ k = keys_sorted[i];
+
+ cond_resched();
+
+ replay_now_at(j, k->journal_seq);
+
+ ret = bch2_trans_do(c, NULL, NULL,
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOFAIL|
+ (!k->allocated
+ ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim
+ : 0),
+ bch2_journal_replay_key(trans, k));
+ if (ret) {
+ bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s",
+ bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret));
+ goto err;
+ }
+ }
+
+ replay_now_at(j, j->replay_journal_seq_end);
+ j->replay_journal_seq = 0;
+
+ bch2_journal_set_replay_done(j);
+ bch2_journal_flush_all_pins(j);
+ ret = bch2_journal_error(j);
+
+ if (keys->nr && !ret)
+ bch2_journal_log_msg(c, "journal replay finished");
+err:
+ kvfree(keys_sorted);
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/* journal replay early: */
+
+static int journal_replay_entry_early(struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ int ret = 0;
+
+ switch (entry->type) {
+ case BCH_JSET_ENTRY_btree_root: {
+ struct btree_root *r;
+
+ while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
+ ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
+ if (ret)
+ return ret;
+ }
+
+ r = bch2_btree_id_root(c, entry->btree_id);
+
+ if (entry->u64s) {
+ r->level = entry->level;
+ bkey_copy(&r->key, &entry->start[0]);
+ r->error = 0;
+ } else {
+ r->error = -EIO;
+ }
+ r->alive = true;
+ break;
+ }
+ case BCH_JSET_ENTRY_usage: {
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+
+ switch (entry->btree_id) {
+ case BCH_FS_USAGE_reserved:
+ if (entry->level < BCH_REPLICAS_MAX)
+ c->usage_base->persistent_reserved[entry->level] =
+ le64_to_cpu(u->v);
+ break;
+ case BCH_FS_USAGE_inodes:
+ c->usage_base->nr_inodes = le64_to_cpu(u->v);
+ break;
+ case BCH_FS_USAGE_key_version:
+ atomic64_set(&c->key_version,
+ le64_to_cpu(u->v));
+ break;
+ }
+
+ break;
+ }
+ case BCH_JSET_ENTRY_data_usage: {
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+
+ ret = bch2_replicas_set_usage(c, &u->r,
+ le64_to_cpu(u->v));
+ break;
+ }
+ case BCH_JSET_ENTRY_dev_usage: {
+ struct jset_entry_dev_usage *u =
+ container_of(entry, struct jset_entry_dev_usage, entry);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
+ unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
+
+ ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
+
+ for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
+ ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
+ ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
+ ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
+ }
+
+ break;
+ }
+ case BCH_JSET_ENTRY_blacklist: {
+ struct jset_entry_blacklist *bl_entry =
+ container_of(entry, struct jset_entry_blacklist, entry);
+
+ ret = bch2_journal_seq_blacklist_add(c,
+ le64_to_cpu(bl_entry->seq),
+ le64_to_cpu(bl_entry->seq) + 1);
+ break;
+ }
+ case BCH_JSET_ENTRY_blacklist_v2: {
+ struct jset_entry_blacklist_v2 *bl_entry =
+ container_of(entry, struct jset_entry_blacklist_v2, entry);
+
+ ret = bch2_journal_seq_blacklist_add(c,
+ le64_to_cpu(bl_entry->start),
+ le64_to_cpu(bl_entry->end) + 1);
+ break;
+ }
+ case BCH_JSET_ENTRY_clock: {
+ struct jset_entry_clock *clock =
+ container_of(entry, struct jset_entry_clock, entry);
+
+ atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
+ }
+ }
+
+ return ret;
+}
+
+static int journal_replay_early(struct bch_fs *c,
+ struct bch_sb_field_clean *clean)
+{
+ struct jset_entry *entry;
+ int ret;
+
+ if (clean) {
+ for (entry = clean->start;
+ entry != vstruct_end(&clean->field);
+ entry = vstruct_next(entry)) {
+ ret = journal_replay_entry_early(c, entry);
+ if (ret)
+ return ret;
+ }
+ } else {
+ struct genradix_iter iter;
+ struct journal_replay *i, **_i;
+
+ genradix_for_each(&c->journal_entries, iter, _i) {
+ i = *_i;
+
+ if (!i || i->ignore)
+ continue;
+
+ vstruct_for_each(&i->j, entry) {
+ ret = journal_replay_entry_early(c, entry);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ bch2_fs_usage_initialize(c);
+
+ return 0;
+}
+
+/* sb clean section: */
+
+static int read_btree_roots(struct bch_fs *c)
+{
+ unsigned i;
+ int ret = 0;
+
+ for (i = 0; i < btree_id_nr_alive(c); i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (!r->alive)
+ continue;
+
+ if (btree_id_is_alloc(i) &&
+ c->opts.reconstruct_alloc) {
+ c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
+ continue;
+ }
+
+ if (r->error) {
+ __fsck_err(c, btree_id_is_alloc(i)
+ ? FSCK_CAN_IGNORE : 0,
+ "invalid btree root %s",
+ bch2_btree_ids[i]);
+ if (i == BTREE_ID_alloc)
+ c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
+ }
+
+ ret = bch2_btree_root_read(c, i, &r->key, r->level);
+ if (ret) {
+ fsck_err(c,
+ "error reading btree root %s",
+ bch2_btree_ids[i]);
+ if (btree_id_is_alloc(i))
+ c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
+ ret = 0;
+ }
+ }
+
+ for (i = 0; i < BTREE_ID_NR; i++) {
+ struct btree_root *r = bch2_btree_id_root(c, i);
+
+ if (!r->b) {
+ r->alive = false;
+ r->level = 0;
+ bch2_btree_root_alloc(c, i);
+ }
+ }
+fsck_err:
+ return ret;
+}
+
+static int bch2_initialize_subvolumes(struct bch_fs *c)
+{
+ struct bkey_i_snapshot_tree root_tree;
+ struct bkey_i_snapshot root_snapshot;
+ struct bkey_i_subvolume root_volume;
+ int ret;
+
+ bkey_snapshot_tree_init(&root_tree.k_i);
+ root_tree.k.p.offset = 1;
+ root_tree.v.master_subvol = cpu_to_le32(1);
+ root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
+
+ bkey_snapshot_init(&root_snapshot.k_i);
+ root_snapshot.k.p.offset = U32_MAX;
+ root_snapshot.v.flags = 0;
+ root_snapshot.v.parent = 0;
+ root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
+ root_snapshot.v.tree = cpu_to_le32(1);
+ SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
+
+ bkey_subvolume_init(&root_volume.k_i);
+ root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
+ root_volume.v.flags = 0;
+ root_volume.v.snapshot = cpu_to_le32(U32_MAX);
+ root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
+
+ ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+ SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ if (!bkey_is_inode(k.k)) {
+ bch_err(trans->c, "root inode not found");
+ ret = -BCH_ERR_ENOENT_inode;
+ goto err;
+ }
+
+ ret = bch2_inode_unpack(k, &inode);
+ BUG_ON(ret);
+
+ inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
+
+ ret = bch2_inode_write(trans, &iter, &inode);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/* set bi_subvol on root inode */
+noinline_for_stack
+static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
+{
+ int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
+ __bch2_fs_upgrade_for_subvolumes(trans));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+const char * const bch2_recovery_passes[] = {
+#define x(_fn, _when) #_fn,
+ BCH_RECOVERY_PASSES()
+#undef x
+ NULL
+};
+
+static int bch2_check_allocations(struct bch_fs *c)
+{
+ return bch2_gc(c, true, c->opts.norecovery);
+}
+
+static int bch2_set_may_go_rw(struct bch_fs *c)
+{
+ set_bit(BCH_FS_MAY_GO_RW, &c->flags);
+ return 0;
+}
+
+struct recovery_pass_fn {
+ int (*fn)(struct bch_fs *);
+ unsigned when;
+};
+
+static struct recovery_pass_fn recovery_pass_fns[] = {
+#define x(_fn, _when) { .fn = bch2_##_fn, .when = _when },
+ BCH_RECOVERY_PASSES()
+#undef x
+};
+
+static void check_version_upgrade(struct bch_fs *c)
+{
+ unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version);
+ unsigned latest_version = bcachefs_metadata_version_current;
+ unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
+ unsigned new_version = 0;
+ u64 recovery_passes;
+
+ if (old_version < bcachefs_metadata_required_upgrade_below) {
+ if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
+ latest_compatible < bcachefs_metadata_required_upgrade_below)
+ new_version = latest_version;
+ else
+ new_version = latest_compatible;
+ } else {
+ switch (c->opts.version_upgrade) {
+ case BCH_VERSION_UPGRADE_compatible:
+ new_version = latest_compatible;
+ break;
+ case BCH_VERSION_UPGRADE_incompatible:
+ new_version = latest_version;
+ break;
+ case BCH_VERSION_UPGRADE_none:
+ new_version = old_version;
+ break;
+ }
+ }
+
+ if (new_version > old_version) {
+ struct printbuf buf = PRINTBUF;
+
+ if (old_version < bcachefs_metadata_required_upgrade_below)
+ prt_str(&buf, "Version upgrade required:\n");
+
+ if (old_version != c->sb.version) {
+ prt_str(&buf, "Version upgrade from ");
+ bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
+ prt_str(&buf, " to ");
+ bch2_version_to_text(&buf, c->sb.version);
+ prt_str(&buf, " incomplete\n");
+ }
+
+ prt_printf(&buf, "Doing %s version upgrade from ",
+ BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
+ ? "incompatible" : "compatible");
+ bch2_version_to_text(&buf, old_version);
+ prt_str(&buf, " to ");
+ bch2_version_to_text(&buf, new_version);
+ prt_newline(&buf);
+
+ recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version);
+ if (recovery_passes) {
+ if ((recovery_passes & RECOVERY_PASS_ALL_FSCK) == RECOVERY_PASS_ALL_FSCK)
+ prt_str(&buf, "fsck required");
+ else {
+ prt_str(&buf, "running recovery passes: ");
+ prt_bitflags(&buf, bch2_recovery_passes, recovery_passes);
+ }
+
+ c->recovery_passes_explicit |= recovery_passes;
+ c->opts.fix_errors = FSCK_FIX_yes;
+ }
+
+ bch_info(c, "%s", buf.buf);
+
+ mutex_lock(&c->sb_lock);
+ bch2_sb_upgrade(c, new_version);
+ mutex_unlock(&c->sb_lock);
+
+ printbuf_exit(&buf);
+ }
+}
+
+u64 bch2_fsck_recovery_passes(void)
+{
+ u64 ret = 0;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
+ if (recovery_pass_fns[i].when & PASS_FSCK)
+ ret |= BIT_ULL(i);
+ return ret;
+}
+
+static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
+{
+ struct recovery_pass_fn *p = recovery_pass_fns + c->curr_recovery_pass;
+
+ if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
+ return false;
+ if (c->recovery_passes_explicit & BIT_ULL(pass))
+ return true;
+ if ((p->when & PASS_FSCK) && c->opts.fsck)
+ return true;
+ if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
+ return true;
+ if (p->when & PASS_ALWAYS)
+ return true;
+ return false;
+}
+
+static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
+{
+ int ret;
+
+ c->curr_recovery_pass = pass;
+
+ if (should_run_recovery_pass(c, pass)) {
+ struct recovery_pass_fn *p = recovery_pass_fns + pass;
+
+ if (!(p->when & PASS_SILENT))
+ printk(KERN_INFO bch2_log_msg(c, "%s..."),
+ bch2_recovery_passes[pass]);
+ ret = p->fn(c);
+ if (ret)
+ return ret;
+ if (!(p->when & PASS_SILENT))
+ printk(KERN_CONT " done\n");
+
+ c->recovery_passes_complete |= BIT_ULL(pass);
+ }
+
+ return 0;
+}
+
+static int bch2_run_recovery_passes(struct bch_fs *c)
+{
+ int ret = 0;
+
+ while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
+ ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
+ if (bch2_err_matches(ret, BCH_ERR_restart_recovery))
+ continue;
+ if (ret)
+ break;
+ c->curr_recovery_pass++;
+ }
+
+ return ret;
+}
+
+int bch2_fs_recovery(struct bch_fs *c)
+{
+ struct bch_sb_field_clean *clean = NULL;
+ struct jset *last_journal_entry = NULL;
+ u64 last_seq = 0, blacklist_seq, journal_seq;
+ bool write_sb = false;
+ int ret = 0;
+
+ if (c->sb.clean) {
+ clean = bch2_read_superblock_clean(c);
+ ret = PTR_ERR_OR_ZERO(clean);
+ if (ret)
+ goto err;
+
+ bch_info(c, "recovering from clean shutdown, journal seq %llu",
+ le64_to_cpu(clean->journal_seq));
+ } else {
+ bch_info(c, "recovering from unclean shutdown");
+ }
+
+ if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
+ bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (!c->sb.clean &&
+ !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
+ bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (c->opts.fsck || !(c->opts.nochanges && c->opts.norecovery))
+ check_version_upgrade(c);
+
+ if (c->opts.fsck && c->opts.norecovery) {
+ bch_err(c, "cannot select both norecovery and fsck");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = bch2_blacklist_table_initialize(c);
+ if (ret) {
+ bch_err(c, "error initializing blacklist table");
+ goto err;
+ }
+
+ if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
+ struct genradix_iter iter;
+ struct journal_replay **i;
+
+ bch_verbose(c, "starting journal read");
+ ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
+ if (ret)
+ goto err;
+
+ /*
+ * note: cmd_list_journal needs the blacklist table fully up to date so
+ * it can asterisk ignored journal entries:
+ */
+ if (c->opts.read_journal_only)
+ goto out;
+
+ genradix_for_each_reverse(&c->journal_entries, iter, i)
+ if (*i && !(*i)->ignore) {
+ last_journal_entry = &(*i)->j;
+ break;
+ }
+
+ if (mustfix_fsck_err_on(c->sb.clean &&
+ last_journal_entry &&
+ !journal_entry_empty(last_journal_entry), c,
+ "filesystem marked clean but journal not empty")) {
+ c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
+ SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
+ c->sb.clean = false;
+ }
+
+ if (!last_journal_entry) {
+ fsck_err_on(!c->sb.clean, c, "no journal entries found");
+ if (clean)
+ goto use_clean;
+
+ genradix_for_each_reverse(&c->journal_entries, iter, i)
+ if (*i) {
+ last_journal_entry = &(*i)->j;
+ (*i)->ignore = false;
+ break;
+ }
+ }
+
+ ret = bch2_journal_keys_sort(c);
+ if (ret)
+ goto err;
+
+ if (c->sb.clean && last_journal_entry) {
+ ret = bch2_verify_superblock_clean(c, &clean,
+ last_journal_entry);
+ if (ret)
+ goto err;
+ }
+ } else {
+use_clean:
+ if (!clean) {
+ bch_err(c, "no superblock clean section found");
+ ret = -BCH_ERR_fsck_repair_impossible;
+ goto err;
+
+ }
+ blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
+ }
+
+ c->journal_replay_seq_start = last_seq;
+ c->journal_replay_seq_end = blacklist_seq - 1;
+
+ if (c->opts.reconstruct_alloc) {
+ c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
+ drop_alloc_keys(&c->journal_keys);
+ }
+
+ zero_out_btree_mem_ptr(&c->journal_keys);
+
+ ret = journal_replay_early(c, clean);
+ if (ret)
+ goto err;
+
+ /*
+ * After an unclean shutdown, skip then next few journal sequence
+ * numbers as they may have been referenced by btree writes that
+ * happened before their corresponding journal writes - those btree
+ * writes need to be ignored, by skipping and blacklisting the next few
+ * journal sequence numbers:
+ */
+ if (!c->sb.clean)
+ journal_seq += 8;
+
+ if (blacklist_seq != journal_seq) {
+ ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
+ blacklist_seq, journal_seq) ?:
+ bch2_journal_seq_blacklist_add(c,
+ blacklist_seq, journal_seq);
+ if (ret) {
+ bch_err(c, "error creating new journal seq blacklist entry");
+ goto err;
+ }
+ }
+
+ ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
+ journal_seq, last_seq, blacklist_seq - 1) ?:
+ bch2_fs_journal_start(&c->journal, journal_seq);
+ if (ret)
+ goto err;
+
+ if (c->opts.reconstruct_alloc)
+ bch2_journal_log_msg(c, "dropping alloc info");
+
+ /*
+ * Skip past versions that might have possibly been used (as nonces),
+ * but hadn't had their pointers written:
+ */
+ if (c->sb.encryption_type && !c->sb.clean)
+ atomic64_add(1 << 16, &c->key_version);
+
+ ret = read_btree_roots(c);
+ if (ret)
+ goto err;
+
+ if (c->opts.fsck &&
+ (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
+ BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)))
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+
+ ret = bch2_run_recovery_passes(c);
+ if (ret)
+ goto err;
+
+ /* If we fixed errors, verify that fs is actually clean now: */
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
+ test_bit(BCH_FS_ERRORS_FIXED, &c->flags) &&
+ !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) &&
+ !test_bit(BCH_FS_ERROR, &c->flags)) {
+ bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
+ clear_bit(BCH_FS_ERRORS_FIXED, &c->flags);
+
+ c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
+
+ ret = bch2_run_recovery_passes(c);
+ if (ret)
+ goto err;
+
+ if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) ||
+ test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
+ bch_err(c, "Second fsck run was not clean");
+ set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags);
+ }
+
+ set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
+ }
+
+ if (enabled_qtypes(c)) {
+ bch_verbose(c, "reading quotas");
+ ret = bch2_fs_quota_read(c);
+ if (ret)
+ goto err;
+ bch_verbose(c, "quotas done");
+ }
+
+ mutex_lock(&c->sb_lock);
+ if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != c->sb.version) {
+ SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, c->sb.version);
+ write_sb = true;
+ }
+
+ if (!test_bit(BCH_FS_ERROR, &c->flags)) {
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
+ write_sb = true;
+ }
+
+ if (c->opts.fsck &&
+ !test_bit(BCH_FS_ERROR, &c->flags) &&
+ !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
+ SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
+ SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
+ write_sb = true;
+ }
+
+ if (write_sb)
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
+ c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
+ struct bch_move_stats stats;
+
+ bch2_move_stats_init(&stats, "recovery");
+
+ bch_info(c, "scanning for old btree nodes");
+ ret = bch2_fs_read_write(c) ?:
+ bch2_scan_old_btree_nodes(c, &stats);
+ if (ret)
+ goto err;
+ bch_info(c, "scanning for old btree nodes done");
+ }
+
+ if (c->journal_seq_blacklist_table &&
+ c->journal_seq_blacklist_table->nr > 128)
+ queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
+
+ ret = 0;
+out:
+ set_bit(BCH_FS_FSCK_DONE, &c->flags);
+ bch2_flush_fsck_errs(c);
+
+ if (!c->opts.keep_journal &&
+ test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
+ bch2_journal_keys_free(&c->journal_keys);
+ bch2_journal_entries_free(c);
+ }
+ kfree(clean);
+
+ if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) {
+ bch2_fs_read_write_early(c);
+ bch2_delete_dead_snapshots_async(c);
+ }
+
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+err:
+fsck_err:
+ bch2_fs_emergency_read_only(c);
+ goto out;
+}
+
+int bch2_fs_initialize(struct bch_fs *c)
+{
+ struct bch_inode_unpacked root_inode, lostfound_inode;
+ struct bkey_inode_buf packed_inode;
+ struct qstr lostfound = QSTR("lost+found");
+ struct bch_dev *ca;
+ unsigned i;
+ int ret;
+
+ bch_notice(c, "initializing new filesystem");
+
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
+
+ bch2_sb_maybe_downgrade(c);
+
+ if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
+ bch2_sb_upgrade(c, bcachefs_metadata_version_current);
+ SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+
+ c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
+ set_bit(BCH_FS_MAY_GO_RW, &c->flags);
+ set_bit(BCH_FS_FSCK_DONE, &c->flags);
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ bch2_btree_root_alloc(c, i);
+
+ for_each_online_member(ca, c, i)
+ bch2_dev_usage_init(ca);
+
+ for_each_online_member(ca, c, i) {
+ ret = bch2_dev_journal_alloc(ca);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ goto err;
+ }
+ }
+
+ /*
+ * journal_res_get() will crash if called before this has
+ * set up the journal.pin FIFO and journal.cur pointer:
+ */
+ bch2_fs_journal_start(&c->journal, 1);
+ bch2_journal_set_replay_done(&c->journal);
+
+ ret = bch2_fs_read_write_early(c);
+ if (ret)
+ goto err;
+
+ /*
+ * Write out the superblock and journal buckets, now that we can do
+ * btree updates
+ */
+ bch_verbose(c, "marking superblocks");
+ for_each_member_device(ca, c, i) {
+ ret = bch2_trans_mark_dev_sb(c, ca);
+ if (ret) {
+ percpu_ref_put(&ca->ref);
+ goto err;
+ }
+
+ ca->new_fs_bucket_idx = 0;
+ }
+
+ ret = bch2_fs_freespace_init(c);
+ if (ret)
+ goto err;
+
+ ret = bch2_initialize_subvolumes(c);
+ if (ret)
+ goto err;
+
+ bch_verbose(c, "reading snapshots table");
+ ret = bch2_snapshots_read(c);
+ if (ret)
+ goto err;
+ bch_verbose(c, "reading snapshots done");
+
+ bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
+ root_inode.bi_inum = BCACHEFS_ROOT_INO;
+ root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
+ bch2_inode_pack(&packed_inode, &root_inode);
+ packed_inode.inode.k.p.snapshot = U32_MAX;
+
+ ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
+ if (ret) {
+ bch_err_msg(c, ret, "creating root directory");
+ goto err;
+ }
+
+ bch2_inode_init_early(c, &lostfound_inode);
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_create_trans(trans,
+ BCACHEFS_ROOT_SUBVOL_INUM,
+ &root_inode, &lostfound_inode,
+ &lostfound,
+ 0, 0, S_IFDIR|0700, 0,
+ NULL, NULL, (subvol_inum) { 0 }, 0));
+ if (ret) {
+ bch_err_msg(c, ret, "creating lost+found");
+ goto err;
+ }
+
+ if (enabled_qtypes(c)) {
+ ret = bch2_fs_quota_read(c);
+ if (ret)
+ goto err;
+ }
+
+ ret = bch2_journal_flush(&c->journal);
+ if (ret) {
+ bch_err_msg(c, ret, "writing first journal entry");
+ goto err;
+ }
+
+ mutex_lock(&c->sb_lock);
+ SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
+ SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ return 0;
+err:
+ bch_err_fn(ca, ret);
+ return ret;
+}
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
new file mode 100644
index 000000000000..852d30567da9
--- /dev/null
+++ b/fs/bcachefs/recovery.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_RECOVERY_H
+#define _BCACHEFS_RECOVERY_H
+
+extern const char * const bch2_recovery_passes[];
+
+/*
+ * For when we need to rewind recovery passes and run a pass we skipped:
+ */
+static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
+ bch2_recovery_passes[pass], pass,
+ bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
+
+ c->recovery_passes_explicit |= BIT_ULL(pass);
+
+ if (c->curr_recovery_pass >= pass) {
+ c->curr_recovery_pass = pass;
+ c->recovery_passes_complete &= (1ULL << pass) >> 1;
+ return -BCH_ERR_restart_recovery;
+ } else {
+ return 0;
+ }
+}
+
+u64 bch2_fsck_recovery_passes(void);
+
+int bch2_fs_recovery(struct bch_fs *);
+int bch2_fs_initialize(struct bch_fs *);
+
+#endif /* _BCACHEFS_RECOVERY_H */
diff --git a/fs/bcachefs/recovery_types.h b/fs/bcachefs/recovery_types.h
new file mode 100644
index 000000000000..fbfa9d831d6f
--- /dev/null
+++ b/fs/bcachefs/recovery_types.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_RECOVERY_TYPES_H
+#define _BCACHEFS_RECOVERY_TYPES_H
+
+#define PASS_SILENT BIT(0)
+#define PASS_FSCK BIT(1)
+#define PASS_UNCLEAN BIT(2)
+#define PASS_ALWAYS BIT(3)
+
+#define BCH_RECOVERY_PASSES() \
+ x(alloc_read, PASS_ALWAYS) \
+ x(stripes_read, PASS_ALWAYS) \
+ x(initialize_subvolumes, 0) \
+ x(snapshots_read, PASS_ALWAYS) \
+ x(check_topology, 0) \
+ x(check_allocations, PASS_FSCK) \
+ x(set_may_go_rw, PASS_ALWAYS|PASS_SILENT) \
+ x(journal_replay, PASS_ALWAYS) \
+ x(check_alloc_info, PASS_FSCK) \
+ x(check_lrus, PASS_FSCK) \
+ x(check_btree_backpointers, PASS_FSCK) \
+ x(check_backpointers_to_extents,PASS_FSCK) \
+ x(check_extents_to_backpointers,PASS_FSCK) \
+ x(check_alloc_to_lru_refs, PASS_FSCK) \
+ x(fs_freespace_init, PASS_ALWAYS|PASS_SILENT) \
+ x(bucket_gens_init, 0) \
+ x(check_snapshot_trees, PASS_FSCK) \
+ x(check_snapshots, PASS_FSCK) \
+ x(check_subvols, PASS_FSCK) \
+ x(delete_dead_snapshots, PASS_FSCK|PASS_UNCLEAN) \
+ x(fs_upgrade_for_subvolumes, 0) \
+ x(resume_logged_ops, PASS_ALWAYS) \
+ x(check_inodes, PASS_FSCK) \
+ x(check_extents, PASS_FSCK) \
+ x(check_dirents, PASS_FSCK) \
+ x(check_xattrs, PASS_FSCK) \
+ x(check_root, PASS_FSCK) \
+ x(check_directory_structure, PASS_FSCK) \
+ x(check_nlinks, PASS_FSCK) \
+ x(delete_dead_inodes, PASS_FSCK|PASS_UNCLEAN) \
+ x(fix_reflink_p, 0) \
+
+enum bch_recovery_pass {
+#define x(n, when) BCH_RECOVERY_PASS_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+};
+
+#endif /* _BCACHEFS_RECOVERY_TYPES_H */
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
new file mode 100644
index 000000000000..d77d0ea9afff
--- /dev/null
+++ b/fs/bcachefs/reflink.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "extents.h"
+#include "inode.h"
+#include "io_misc.h"
+#include "io_write.h"
+#include "reflink.h"
+#include "subvolume.h"
+#include "super-io.h"
+
+#include <linux/sched/signal.h>
+
+static inline unsigned bkey_type_to_indirect(const struct bkey *k)
+{
+ switch (k->type) {
+ case KEY_TYPE_extent:
+ return KEY_TYPE_reflink_v;
+ case KEY_TYPE_inline_data:
+ return KEY_TYPE_indirect_inline_data;
+ default:
+ return 0;
+ }
+}
+
+/* reflink pointers */
+
+int bch2_reflink_p_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
+
+ if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix &&
+ le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad)) {
+ prt_printf(err, "idx < front_pad (%llu < %u)",
+ le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
+
+ prt_printf(out, "idx %llu front_pad %u back_pad %u",
+ le64_to_cpu(p.v->idx),
+ le32_to_cpu(p.v->front_pad),
+ le32_to_cpu(p.v->back_pad));
+}
+
+bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
+{
+ struct bkey_s_reflink_p l = bkey_s_to_reflink_p(_l);
+ struct bkey_s_c_reflink_p r = bkey_s_c_to_reflink_p(_r);
+
+ /*
+ * Disabled for now, the triggers code needs to be reworked for merging
+ * of reflink pointers to work:
+ */
+ return false;
+
+ if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx))
+ return false;
+
+ bch2_key_resize(l.k, l.k->size + r.k->size);
+ return true;
+}
+
+/* indirect extents */
+
+int bch2_reflink_v_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ return bch2_bkey_ptrs_invalid(c, k, flags, err);
+}
+
+void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
+
+ prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount));
+
+ bch2_bkey_ptrs_to_text(out, c, k);
+}
+
+#if 0
+Currently disabled, needs to be debugged:
+
+bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
+{
+ struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l);
+ struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r);
+
+ return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
+}
+#endif
+
+int bch2_trans_mark_reflink_v(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
+ struct bkey_i_reflink_v *r = bkey_i_to_reflink_v(new);
+
+ if (!r->v.refcount) {
+ r->k.type = KEY_TYPE_deleted;
+ r->k.size = 0;
+ set_bkey_val_u64s(&r->k, 0);
+ return 0;
+ }
+ }
+
+ return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
+}
+
+/* indirect inline data */
+
+int bch2_indirect_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ return 0;
+}
+
+void bch2_indirect_inline_data_to_text(struct printbuf *out,
+ struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
+ unsigned datalen = bkey_inline_data_bytes(k.k);
+
+ prt_printf(out, "refcount %llu datalen %u: %*phN",
+ le64_to_cpu(d.v->refcount), datalen,
+ min(datalen, 32U), d.v->data);
+}
+
+int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new,
+ unsigned flags)
+{
+ if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
+ struct bkey_i_indirect_inline_data *r =
+ bkey_i_to_indirect_inline_data(new);
+
+ if (!r->v.refcount) {
+ r->k.type = KEY_TYPE_deleted;
+ r->k.size = 0;
+ set_bkey_val_u64s(&r->k, 0);
+ }
+ }
+
+ return 0;
+}
+
+static int bch2_make_extent_indirect(struct btree_trans *trans,
+ struct btree_iter *extent_iter,
+ struct bkey_i *orig)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter reflink_iter = { NULL };
+ struct bkey_s_c k;
+ struct bkey_i *r_v;
+ struct bkey_i_reflink_p *r_p;
+ __le64 *refcount;
+ int ret;
+
+ if (orig->k.type == KEY_TYPE_inline_data)
+ bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
+
+ bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_prev(&reflink_iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k));
+ ret = PTR_ERR_OR_ZERO(r_v);
+ if (ret)
+ goto err;
+
+ bkey_init(&r_v->k);
+ r_v->k.type = bkey_type_to_indirect(&orig->k);
+ r_v->k.p = reflink_iter.pos;
+ bch2_key_resize(&r_v->k, orig->k.size);
+ r_v->k.version = orig->k.version;
+
+ set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
+
+ refcount = bkey_refcount(r_v);
+ *refcount = 0;
+ memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
+
+ ret = bch2_trans_update(trans, &reflink_iter, r_v, 0);
+ if (ret)
+ goto err;
+
+ /*
+ * orig is in a bkey_buf which statically allocates 5 64s for the val,
+ * so we know it will be big enough:
+ */
+ orig->k.type = KEY_TYPE_reflink_p;
+ r_p = bkey_i_to_reflink_p(orig);
+ set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
+
+ /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */
+#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+ __underlying_memset(&r_p->v, 0, sizeof(r_p->v));
+#else
+ memset(&r_p->v, 0, sizeof(r_p->v));
+#endif
+
+ r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
+
+ ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+err:
+ bch2_trans_iter_exit(trans, &reflink_iter);
+
+ return ret;
+}
+
+static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
+{
+ struct bkey_s_c k;
+ int ret;
+
+ for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) {
+ if (bkey_extent_is_unwritten(k))
+ continue;
+
+ if (bkey_extent_is_data(k.k))
+ return k;
+ }
+
+ if (bkey_ge(iter->pos, end))
+ bch2_btree_iter_set_pos(iter, end);
+ return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
+}
+
+s64 bch2_remap_range(struct bch_fs *c,
+ subvol_inum dst_inum, u64 dst_offset,
+ subvol_inum src_inum, u64 src_offset,
+ u64 remap_sectors,
+ u64 new_i_size, s64 *i_sectors_delta)
+{
+ struct btree_trans *trans;
+ struct btree_iter dst_iter, src_iter;
+ struct bkey_s_c src_k;
+ struct bkey_buf new_dst, new_src;
+ struct bpos dst_start = POS(dst_inum.inum, dst_offset);
+ struct bpos src_start = POS(src_inum.inum, src_offset);
+ struct bpos dst_end = dst_start, src_end = src_start;
+ struct bpos src_want;
+ u64 dst_done;
+ u32 dst_snapshot, src_snapshot;
+ int ret = 0, ret2 = 0;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink))
+ return -BCH_ERR_erofs_no_writes;
+
+ bch2_check_set_feature(c, BCH_FEATURE_reflink);
+
+ dst_end.offset += remap_sectors;
+ src_end.offset += remap_sectors;
+
+ bch2_bkey_buf_init(&new_dst);
+ bch2_bkey_buf_init(&new_src);
+ trans = bch2_trans_get(c);
+
+ bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
+ BTREE_ITER_INTENT);
+ bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
+ BTREE_ITER_INTENT);
+
+ while ((ret == 0 ||
+ bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
+ bkey_lt(dst_iter.pos, dst_end)) {
+ struct disk_reservation disk_res = { 0 };
+
+ bch2_trans_begin(trans);
+
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ ret = bch2_subvolume_get_snapshot(trans, src_inum.subvol,
+ &src_snapshot);
+ if (ret)
+ continue;
+
+ bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
+
+ ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
+ &dst_snapshot);
+ if (ret)
+ continue;
+
+ bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
+
+ dst_done = dst_iter.pos.offset - dst_start.offset;
+ src_want = POS(src_start.inode, src_start.offset + dst_done);
+ bch2_btree_iter_set_pos(&src_iter, src_want);
+
+ src_k = get_next_src(&src_iter, src_end);
+ ret = bkey_err(src_k);
+ if (ret)
+ continue;
+
+ if (bkey_lt(src_want, src_iter.pos)) {
+ ret = bch2_fpunch_at(trans, &dst_iter, dst_inum,
+ min(dst_end.offset,
+ dst_iter.pos.offset +
+ src_iter.pos.offset - src_want.offset),
+ i_sectors_delta);
+ continue;
+ }
+
+ if (src_k.k->type != KEY_TYPE_reflink_p) {
+ bch2_btree_iter_set_pos_to_extent_start(&src_iter);
+
+ bch2_bkey_buf_reassemble(&new_src, c, src_k);
+ src_k = bkey_i_to_s_c(new_src.k);
+
+ ret = bch2_make_extent_indirect(trans, &src_iter,
+ new_src.k);
+ if (ret)
+ continue;
+
+ BUG_ON(src_k.k->type != KEY_TYPE_reflink_p);
+ }
+
+ if (src_k.k->type == KEY_TYPE_reflink_p) {
+ struct bkey_s_c_reflink_p src_p =
+ bkey_s_c_to_reflink_p(src_k);
+ struct bkey_i_reflink_p *dst_p =
+ bkey_reflink_p_init(new_dst.k);
+
+ u64 offset = le64_to_cpu(src_p.v->idx) +
+ (src_want.offset -
+ bkey_start_offset(src_k.k));
+
+ dst_p->v.idx = cpu_to_le64(offset);
+ } else {
+ BUG();
+ }
+
+ new_dst.k->k.p = dst_iter.pos;
+ bch2_key_resize(&new_dst.k->k,
+ min(src_k.k->p.offset - src_want.offset,
+ dst_end.offset - dst_iter.pos.offset));
+
+ ret = bch2_extent_update(trans, dst_inum, &dst_iter,
+ new_dst.k, &disk_res,
+ new_i_size, i_sectors_delta,
+ true);
+ bch2_disk_reservation_put(c, &disk_res);
+ }
+ bch2_trans_iter_exit(trans, &dst_iter);
+ bch2_trans_iter_exit(trans, &src_iter);
+
+ BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
+ BUG_ON(bkey_gt(dst_iter.pos, dst_end));
+
+ dst_done = dst_iter.pos.offset - dst_start.offset;
+ new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
+
+ do {
+ struct bch_inode_unpacked inode_u;
+ struct btree_iter inode_iter = { NULL };
+
+ bch2_trans_begin(trans);
+
+ ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
+ dst_inum, BTREE_ITER_INTENT);
+
+ if (!ret2 &&
+ inode_u.bi_size < new_i_size) {
+ inode_u.bi_size = new_i_size;
+ ret2 = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+ }
+
+ bch2_trans_iter_exit(trans, &inode_iter);
+ } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
+
+ bch2_trans_put(trans);
+ bch2_bkey_buf_exit(&new_src, c);
+ bch2_bkey_buf_exit(&new_dst, c);
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_reflink);
+
+ return dst_done ?: ret ?: ret2;
+}
diff --git a/fs/bcachefs/reflink.h b/fs/bcachefs/reflink.h
new file mode 100644
index 000000000000..fe52538efb52
--- /dev/null
+++ b/fs/bcachefs/reflink.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REFLINK_H
+#define _BCACHEFS_REFLINK_H
+
+enum bkey_invalid_flags;
+
+int bch2_reflink_p_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
+
+#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
+ .key_invalid = bch2_reflink_p_invalid, \
+ .val_to_text = bch2_reflink_p_to_text, \
+ .key_merge = bch2_reflink_p_merge, \
+ .trans_trigger = bch2_trans_mark_reflink_p, \
+ .atomic_trigger = bch2_mark_reflink_p, \
+ .min_val_size = 16, \
+})
+
+int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
+ struct bkey_s_c);
+int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_i *, unsigned);
+
+#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
+ .key_invalid = bch2_reflink_v_invalid, \
+ .val_to_text = bch2_reflink_v_to_text, \
+ .swab = bch2_ptr_swab, \
+ .trans_trigger = bch2_trans_mark_reflink_v, \
+ .atomic_trigger = bch2_mark_extent, \
+ .min_val_size = 8, \
+})
+
+int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_indirect_inline_data_to_text(struct printbuf *,
+ struct bch_fs *, struct bkey_s_c);
+int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
+ enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_i *,
+ unsigned);
+
+#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) { \
+ .key_invalid = bch2_indirect_inline_data_invalid, \
+ .val_to_text = bch2_indirect_inline_data_to_text, \
+ .trans_trigger = bch2_trans_mark_indirect_inline_data, \
+ .min_val_size = 8, \
+})
+
+static inline const __le64 *bkey_refcount_c(struct bkey_s_c k)
+{
+ switch (k.k->type) {
+ case KEY_TYPE_reflink_v:
+ return &bkey_s_c_to_reflink_v(k).v->refcount;
+ case KEY_TYPE_indirect_inline_data:
+ return &bkey_s_c_to_indirect_inline_data(k).v->refcount;
+ default:
+ return NULL;
+ }
+}
+
+static inline __le64 *bkey_refcount(struct bkey_i *k)
+{
+ switch (k->k.type) {
+ case KEY_TYPE_reflink_v:
+ return &bkey_i_to_reflink_v(k)->v.refcount;
+ case KEY_TYPE_indirect_inline_data:
+ return &bkey_i_to_indirect_inline_data(k)->v.refcount;
+ default:
+ return NULL;
+ }
+}
+
+s64 bch2_remap_range(struct bch_fs *, subvol_inum, u64,
+ subvol_inum, u64, u64, u64, s64 *);
+
+#endif /* _BCACHEFS_REFLINK_H */
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
new file mode 100644
index 000000000000..cef2a0447b86
--- /dev/null
+++ b/fs/bcachefs/replicas.c
@@ -0,0 +1,1058 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "buckets.h"
+#include "journal.h"
+#include "replicas.h"
+#include "super-io.h"
+
+static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
+ struct bch_replicas_cpu *);
+
+/* Replicas tracking - in memory: */
+
+static void verify_replicas_entry(struct bch_replicas_entry *e)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ unsigned i;
+
+ BUG_ON(e->data_type >= BCH_DATA_NR);
+ BUG_ON(!e->nr_devs);
+ BUG_ON(e->nr_required > 1 &&
+ e->nr_required >= e->nr_devs);
+
+ for (i = 0; i + 1 < e->nr_devs; i++)
+ BUG_ON(e->devs[i] >= e->devs[i + 1]);
+#endif
+}
+
+void bch2_replicas_entry_sort(struct bch_replicas_entry *e)
+{
+ bubble_sort(e->devs, e->nr_devs, u8_cmp);
+}
+
+static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
+{
+ eytzinger0_sort(r->entries, r->nr, r->entry_size, memcmp, NULL);
+}
+
+static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
+ struct bch_replicas_entry_v0 *e)
+{
+ unsigned i;
+
+ if (e->data_type < BCH_DATA_NR)
+ prt_printf(out, "%s", bch2_data_types[e->data_type]);
+ else
+ prt_printf(out, "(invalid data type %u)", e->data_type);
+
+ prt_printf(out, ": %u [", e->nr_devs);
+ for (i = 0; i < e->nr_devs; i++)
+ prt_printf(out, i ? " %u" : "%u", e->devs[i]);
+ prt_printf(out, "]");
+}
+
+void bch2_replicas_entry_to_text(struct printbuf *out,
+ struct bch_replicas_entry *e)
+{
+ unsigned i;
+
+ if (e->data_type < BCH_DATA_NR)
+ prt_printf(out, "%s", bch2_data_types[e->data_type]);
+ else
+ prt_printf(out, "(invalid data type %u)", e->data_type);
+
+ prt_printf(out, ": %u/%u [", e->nr_required, e->nr_devs);
+ for (i = 0; i < e->nr_devs; i++)
+ prt_printf(out, i ? " %u" : "%u", e->devs[i]);
+ prt_printf(out, "]");
+}
+
+void bch2_cpu_replicas_to_text(struct printbuf *out,
+ struct bch_replicas_cpu *r)
+{
+ struct bch_replicas_entry *e;
+ bool first = true;
+
+ for_each_cpu_replicas_entry(r, e) {
+ if (!first)
+ prt_printf(out, " ");
+ first = false;
+
+ bch2_replicas_entry_to_text(out, e);
+ }
+}
+
+static void extent_to_replicas(struct bkey_s_c k,
+ struct bch_replicas_entry *r)
+{
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+
+ r->nr_required = 1;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ if (p.ptr.cached)
+ continue;
+
+ if (!p.has_ec)
+ r->devs[r->nr_devs++] = p.ptr.dev;
+ else
+ r->nr_required = 0;
+ }
+}
+
+static void stripe_to_replicas(struct bkey_s_c k,
+ struct bch_replicas_entry *r)
+{
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+ const struct bch_extent_ptr *ptr;
+
+ r->nr_required = s.v->nr_blocks - s.v->nr_redundant;
+
+ for (ptr = s.v->ptrs;
+ ptr < s.v->ptrs + s.v->nr_blocks;
+ ptr++)
+ r->devs[r->nr_devs++] = ptr->dev;
+}
+
+void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
+ struct bkey_s_c k)
+{
+ e->nr_devs = 0;
+
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ e->data_type = BCH_DATA_btree;
+ extent_to_replicas(k, e);
+ break;
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v:
+ e->data_type = BCH_DATA_user;
+ extent_to_replicas(k, e);
+ break;
+ case KEY_TYPE_stripe:
+ e->data_type = BCH_DATA_parity;
+ stripe_to_replicas(k, e);
+ break;
+ }
+
+ bch2_replicas_entry_sort(e);
+}
+
+void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
+ enum bch_data_type data_type,
+ struct bch_devs_list devs)
+{
+ unsigned i;
+
+ BUG_ON(!data_type ||
+ data_type == BCH_DATA_sb ||
+ data_type >= BCH_DATA_NR);
+
+ e->data_type = data_type;
+ e->nr_devs = 0;
+ e->nr_required = 1;
+
+ for (i = 0; i < devs.nr; i++)
+ e->devs[e->nr_devs++] = devs.devs[i];
+
+ bch2_replicas_entry_sort(e);
+}
+
+static struct bch_replicas_cpu
+cpu_replicas_add_entry(struct bch_replicas_cpu *old,
+ struct bch_replicas_entry *new_entry)
+{
+ unsigned i;
+ struct bch_replicas_cpu new = {
+ .nr = old->nr + 1,
+ .entry_size = max_t(unsigned, old->entry_size,
+ replicas_entry_bytes(new_entry)),
+ };
+
+ BUG_ON(!new_entry->data_type);
+ verify_replicas_entry(new_entry);
+
+ new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
+ if (!new.entries)
+ return new;
+
+ for (i = 0; i < old->nr; i++)
+ memcpy(cpu_replicas_entry(&new, i),
+ cpu_replicas_entry(old, i),
+ old->entry_size);
+
+ memcpy(cpu_replicas_entry(&new, old->nr),
+ new_entry,
+ replicas_entry_bytes(new_entry));
+
+ bch2_cpu_replicas_sort(&new);
+ return new;
+}
+
+static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
+ struct bch_replicas_entry *search)
+{
+ int idx, entry_size = replicas_entry_bytes(search);
+
+ if (unlikely(entry_size > r->entry_size))
+ return -1;
+
+ verify_replicas_entry(search);
+
+#define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
+ idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
+ entry_cmp, search);
+#undef entry_cmp
+
+ return idx < r->nr ? idx : -1;
+}
+
+int bch2_replicas_entry_idx(struct bch_fs *c,
+ struct bch_replicas_entry *search)
+{
+ bch2_replicas_entry_sort(search);
+
+ return __replicas_entry_idx(&c->replicas, search);
+}
+
+static bool __replicas_has_entry(struct bch_replicas_cpu *r,
+ struct bch_replicas_entry *search)
+{
+ return __replicas_entry_idx(r, search) >= 0;
+}
+
+bool bch2_replicas_marked(struct bch_fs *c,
+ struct bch_replicas_entry *search)
+{
+ bool marked;
+
+ if (!search->nr_devs)
+ return true;
+
+ verify_replicas_entry(search);
+
+ percpu_down_read(&c->mark_lock);
+ marked = __replicas_has_entry(&c->replicas, search) &&
+ (likely((!c->replicas_gc.entries)) ||
+ __replicas_has_entry(&c->replicas_gc, search));
+ percpu_up_read(&c->mark_lock);
+
+ return marked;
+}
+
+static void __replicas_table_update(struct bch_fs_usage *dst,
+ struct bch_replicas_cpu *dst_r,
+ struct bch_fs_usage *src,
+ struct bch_replicas_cpu *src_r)
+{
+ int src_idx, dst_idx;
+
+ *dst = *src;
+
+ for (src_idx = 0; src_idx < src_r->nr; src_idx++) {
+ if (!src->replicas[src_idx])
+ continue;
+
+ dst_idx = __replicas_entry_idx(dst_r,
+ cpu_replicas_entry(src_r, src_idx));
+ BUG_ON(dst_idx < 0);
+
+ dst->replicas[dst_idx] = src->replicas[src_idx];
+ }
+}
+
+static void __replicas_table_update_pcpu(struct bch_fs_usage __percpu *dst_p,
+ struct bch_replicas_cpu *dst_r,
+ struct bch_fs_usage __percpu *src_p,
+ struct bch_replicas_cpu *src_r)
+{
+ unsigned src_nr = sizeof(struct bch_fs_usage) / sizeof(u64) + src_r->nr;
+ struct bch_fs_usage *dst, *src = (void *)
+ bch2_acc_percpu_u64s((u64 __percpu *) src_p, src_nr);
+
+ preempt_disable();
+ dst = this_cpu_ptr(dst_p);
+ preempt_enable();
+
+ __replicas_table_update(dst, dst_r, src, src_r);
+}
+
+/*
+ * Resize filesystem accounting:
+ */
+static int replicas_table_update(struct bch_fs *c,
+ struct bch_replicas_cpu *new_r)
+{
+ struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
+ struct bch_fs_usage_online *new_scratch = NULL;
+ struct bch_fs_usage __percpu *new_gc = NULL;
+ struct bch_fs_usage *new_base = NULL;
+ unsigned i, bytes = sizeof(struct bch_fs_usage) +
+ sizeof(u64) * new_r->nr;
+ unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
+ sizeof(u64) * new_r->nr;
+ int ret = 0;
+
+ memset(new_usage, 0, sizeof(new_usage));
+
+ for (i = 0; i < ARRAY_SIZE(new_usage); i++)
+ if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
+ sizeof(u64), GFP_KERNEL)))
+ goto err;
+
+ if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
+ !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
+ (c->usage_gc &&
+ !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(new_usage); i++)
+ if (c->usage[i])
+ __replicas_table_update_pcpu(new_usage[i], new_r,
+ c->usage[i], &c->replicas);
+ if (c->usage_base)
+ __replicas_table_update(new_base, new_r,
+ c->usage_base, &c->replicas);
+ if (c->usage_gc)
+ __replicas_table_update_pcpu(new_gc, new_r,
+ c->usage_gc, &c->replicas);
+
+ for (i = 0; i < ARRAY_SIZE(new_usage); i++)
+ swap(c->usage[i], new_usage[i]);
+ swap(c->usage_base, new_base);
+ swap(c->usage_scratch, new_scratch);
+ swap(c->usage_gc, new_gc);
+ swap(c->replicas, *new_r);
+out:
+ free_percpu(new_gc);
+ kfree(new_scratch);
+ for (i = 0; i < ARRAY_SIZE(new_usage); i++)
+ free_percpu(new_usage[i]);
+ kfree(new_base);
+ return ret;
+err:
+ bch_err(c, "error updating replicas table: memory allocation failure");
+ ret = -BCH_ERR_ENOMEM_replicas_table;
+ goto out;
+}
+
+static unsigned reserve_journal_replicas(struct bch_fs *c,
+ struct bch_replicas_cpu *r)
+{
+ struct bch_replicas_entry *e;
+ unsigned journal_res_u64s = 0;
+
+ /* nr_inodes: */
+ journal_res_u64s +=
+ DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
+
+ /* key_version: */
+ journal_res_u64s +=
+ DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
+
+ /* persistent_reserved: */
+ journal_res_u64s +=
+ DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
+ BCH_REPLICAS_MAX;
+
+ for_each_cpu_replicas_entry(r, e)
+ journal_res_u64s +=
+ DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
+ e->nr_devs, sizeof(u64));
+ return journal_res_u64s;
+}
+
+noinline
+static int bch2_mark_replicas_slowpath(struct bch_fs *c,
+ struct bch_replicas_entry *new_entry)
+{
+ struct bch_replicas_cpu new_r, new_gc;
+ int ret = 0;
+
+ verify_replicas_entry(new_entry);
+
+ memset(&new_r, 0, sizeof(new_r));
+ memset(&new_gc, 0, sizeof(new_gc));
+
+ mutex_lock(&c->sb_lock);
+
+ if (c->replicas_gc.entries &&
+ !__replicas_has_entry(&c->replicas_gc, new_entry)) {
+ new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
+ if (!new_gc.entries) {
+ ret = -BCH_ERR_ENOMEM_cpu_replicas;
+ goto err;
+ }
+ }
+
+ if (!__replicas_has_entry(&c->replicas, new_entry)) {
+ new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
+ if (!new_r.entries) {
+ ret = -BCH_ERR_ENOMEM_cpu_replicas;
+ goto err;
+ }
+
+ ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
+ if (ret)
+ goto err;
+
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->replicas_journal_res,
+ reserve_journal_replicas(c, &new_r));
+ }
+
+ if (!new_r.entries &&
+ !new_gc.entries)
+ goto out;
+
+ /* allocations done, now commit: */
+
+ if (new_r.entries)
+ bch2_write_super(c);
+
+ /* don't update in memory replicas until changes are persistent */
+ percpu_down_write(&c->mark_lock);
+ if (new_r.entries)
+ ret = replicas_table_update(c, &new_r);
+ if (new_gc.entries)
+ swap(new_gc, c->replicas_gc);
+ percpu_up_write(&c->mark_lock);
+out:
+ mutex_unlock(&c->sb_lock);
+
+ kfree(new_r.entries);
+ kfree(new_gc.entries);
+
+ return ret;
+err:
+ bch_err_msg(c, ret, "adding replicas entry");
+ goto out;
+}
+
+int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry *r)
+{
+ return likely(bch2_replicas_marked(c, r))
+ ? 0 : bch2_mark_replicas_slowpath(c, r);
+}
+
+/* replicas delta list: */
+
+int bch2_replicas_delta_list_mark(struct bch_fs *c,
+ struct replicas_delta_list *r)
+{
+ struct replicas_delta *d = r->d;
+ struct replicas_delta *top = (void *) r->d + r->used;
+ int ret = 0;
+
+ for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
+ ret = bch2_mark_replicas(c, &d->r);
+ return ret;
+}
+
+/*
+ * Old replicas_gc mechanism: only used for journal replicas entries now, should
+ * die at some point:
+ */
+
+int bch2_replicas_gc_end(struct bch_fs *c, int ret)
+{
+ lockdep_assert_held(&c->replicas_gc_lock);
+
+ if (ret)
+ goto err;
+
+ mutex_lock(&c->sb_lock);
+ percpu_down_write(&c->mark_lock);
+
+ ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
+ if (ret)
+ goto err;
+
+ ret = replicas_table_update(c, &c->replicas_gc);
+err:
+ kfree(c->replicas_gc.entries);
+ c->replicas_gc.entries = NULL;
+
+ percpu_up_write(&c->mark_lock);
+
+ if (!ret)
+ bch2_write_super(c);
+
+ mutex_unlock(&c->sb_lock);
+
+ return ret;
+}
+
+int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
+{
+ struct bch_replicas_entry *e;
+ unsigned i = 0;
+
+ lockdep_assert_held(&c->replicas_gc_lock);
+
+ mutex_lock(&c->sb_lock);
+ BUG_ON(c->replicas_gc.entries);
+
+ c->replicas_gc.nr = 0;
+ c->replicas_gc.entry_size = 0;
+
+ for_each_cpu_replicas_entry(&c->replicas, e)
+ if (!((1 << e->data_type) & typemask)) {
+ c->replicas_gc.nr++;
+ c->replicas_gc.entry_size =
+ max_t(unsigned, c->replicas_gc.entry_size,
+ replicas_entry_bytes(e));
+ }
+
+ c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
+ c->replicas_gc.entry_size,
+ GFP_KERNEL);
+ if (!c->replicas_gc.entries) {
+ mutex_unlock(&c->sb_lock);
+ bch_err(c, "error allocating c->replicas_gc");
+ return -BCH_ERR_ENOMEM_replicas_gc;
+ }
+
+ for_each_cpu_replicas_entry(&c->replicas, e)
+ if (!((1 << e->data_type) & typemask))
+ memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
+ e, c->replicas_gc.entry_size);
+
+ bch2_cpu_replicas_sort(&c->replicas_gc);
+ mutex_unlock(&c->sb_lock);
+
+ return 0;
+}
+
+/*
+ * New much simpler mechanism for clearing out unneeded replicas entries - drop
+ * replicas entries that have 0 sectors used.
+ *
+ * However, we don't track sector counts for journal usage, so this doesn't drop
+ * any BCH_DATA_journal entries; the old bch2_replicas_gc_(start|end) mechanism
+ * is retained for that.
+ */
+int bch2_replicas_gc2(struct bch_fs *c)
+{
+ struct bch_replicas_cpu new = { 0 };
+ unsigned i, nr;
+ int ret = 0;
+
+ bch2_journal_meta(&c->journal);
+retry:
+ nr = READ_ONCE(c->replicas.nr);
+ new.entry_size = READ_ONCE(c->replicas.entry_size);
+ new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
+ if (!new.entries) {
+ bch_err(c, "error allocating c->replicas_gc");
+ return -BCH_ERR_ENOMEM_replicas_gc;
+ }
+
+ mutex_lock(&c->sb_lock);
+ percpu_down_write(&c->mark_lock);
+
+ if (nr != c->replicas.nr ||
+ new.entry_size != c->replicas.entry_size) {
+ percpu_up_write(&c->mark_lock);
+ mutex_unlock(&c->sb_lock);
+ kfree(new.entries);
+ goto retry;
+ }
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+
+ if (e->data_type == BCH_DATA_journal ||
+ c->usage_base->replicas[i] ||
+ percpu_u64_get(&c->usage[0]->replicas[i]) ||
+ percpu_u64_get(&c->usage[1]->replicas[i]) ||
+ percpu_u64_get(&c->usage[2]->replicas[i]) ||
+ percpu_u64_get(&c->usage[3]->replicas[i]))
+ memcpy(cpu_replicas_entry(&new, new.nr++),
+ e, new.entry_size);
+ }
+
+ bch2_cpu_replicas_sort(&new);
+
+ ret = bch2_cpu_replicas_to_sb_replicas(c, &new);
+ if (ret)
+ goto err;
+
+ ret = replicas_table_update(c, &new);
+err:
+ kfree(new.entries);
+
+ percpu_up_write(&c->mark_lock);
+
+ if (!ret)
+ bch2_write_super(c);
+
+ mutex_unlock(&c->sb_lock);
+
+ return ret;
+}
+
+int bch2_replicas_set_usage(struct bch_fs *c,
+ struct bch_replicas_entry *r,
+ u64 sectors)
+{
+ int ret, idx = bch2_replicas_entry_idx(c, r);
+
+ if (idx < 0) {
+ struct bch_replicas_cpu n;
+
+ n = cpu_replicas_add_entry(&c->replicas, r);
+ if (!n.entries)
+ return -BCH_ERR_ENOMEM_cpu_replicas;
+
+ ret = replicas_table_update(c, &n);
+ if (ret)
+ return ret;
+
+ kfree(n.entries);
+
+ idx = bch2_replicas_entry_idx(c, r);
+ BUG_ON(ret < 0);
+ }
+
+ c->usage_base->replicas[idx] = sectors;
+
+ return 0;
+}
+
+/* Replicas tracking - superblock: */
+
+static int
+__bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
+ struct bch_replicas_cpu *cpu_r)
+{
+ struct bch_replicas_entry *e, *dst;
+ unsigned nr = 0, entry_size = 0, idx = 0;
+
+ for_each_replicas_entry(sb_r, e) {
+ entry_size = max_t(unsigned, entry_size,
+ replicas_entry_bytes(e));
+ nr++;
+ }
+
+ cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
+ if (!cpu_r->entries)
+ return -BCH_ERR_ENOMEM_cpu_replicas;
+
+ cpu_r->nr = nr;
+ cpu_r->entry_size = entry_size;
+
+ for_each_replicas_entry(sb_r, e) {
+ dst = cpu_replicas_entry(cpu_r, idx++);
+ memcpy(dst, e, replicas_entry_bytes(e));
+ bch2_replicas_entry_sort(dst);
+ }
+
+ return 0;
+}
+
+static int
+__bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
+ struct bch_replicas_cpu *cpu_r)
+{
+ struct bch_replicas_entry_v0 *e;
+ unsigned nr = 0, entry_size = 0, idx = 0;
+
+ for_each_replicas_entry(sb_r, e) {
+ entry_size = max_t(unsigned, entry_size,
+ replicas_entry_bytes(e));
+ nr++;
+ }
+
+ entry_size += sizeof(struct bch_replicas_entry) -
+ sizeof(struct bch_replicas_entry_v0);
+
+ cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
+ if (!cpu_r->entries)
+ return -BCH_ERR_ENOMEM_cpu_replicas;
+
+ cpu_r->nr = nr;
+ cpu_r->entry_size = entry_size;
+
+ for_each_replicas_entry(sb_r, e) {
+ struct bch_replicas_entry *dst =
+ cpu_replicas_entry(cpu_r, idx++);
+
+ dst->data_type = e->data_type;
+ dst->nr_devs = e->nr_devs;
+ dst->nr_required = 1;
+ memcpy(dst->devs, e->devs, e->nr_devs);
+ bch2_replicas_entry_sort(dst);
+ }
+
+ return 0;
+}
+
+int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
+{
+ struct bch_sb_field_replicas *sb_v1;
+ struct bch_sb_field_replicas_v0 *sb_v0;
+ struct bch_replicas_cpu new_r = { 0, 0, NULL };
+ int ret = 0;
+
+ if ((sb_v1 = bch2_sb_field_get(c->disk_sb.sb, replicas)))
+ ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
+ else if ((sb_v0 = bch2_sb_field_get(c->disk_sb.sb, replicas_v0)))
+ ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
+ if (ret)
+ return ret;
+
+ bch2_cpu_replicas_sort(&new_r);
+
+ percpu_down_write(&c->mark_lock);
+
+ ret = replicas_table_update(c, &new_r);
+ percpu_up_write(&c->mark_lock);
+
+ kfree(new_r.entries);
+
+ return 0;
+}
+
+static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
+ struct bch_replicas_cpu *r)
+{
+ struct bch_sb_field_replicas_v0 *sb_r;
+ struct bch_replicas_entry_v0 *dst;
+ struct bch_replicas_entry *src;
+ size_t bytes;
+
+ bytes = sizeof(struct bch_sb_field_replicas);
+
+ for_each_cpu_replicas_entry(r, src)
+ bytes += replicas_entry_bytes(src) - 1;
+
+ sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0,
+ DIV_ROUND_UP(bytes, sizeof(u64)));
+ if (!sb_r)
+ return -BCH_ERR_ENOSPC_sb_replicas;
+
+ bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
+ sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas_v0);
+
+ memset(&sb_r->entries, 0,
+ vstruct_end(&sb_r->field) -
+ (void *) &sb_r->entries);
+
+ dst = sb_r->entries;
+ for_each_cpu_replicas_entry(r, src) {
+ dst->data_type = src->data_type;
+ dst->nr_devs = src->nr_devs;
+ memcpy(dst->devs, src->devs, src->nr_devs);
+
+ dst = replicas_entry_next(dst);
+
+ BUG_ON((void *) dst > vstruct_end(&sb_r->field));
+ }
+
+ return 0;
+}
+
+static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
+ struct bch_replicas_cpu *r)
+{
+ struct bch_sb_field_replicas *sb_r;
+ struct bch_replicas_entry *dst, *src;
+ bool need_v1 = false;
+ size_t bytes;
+
+ bytes = sizeof(struct bch_sb_field_replicas);
+
+ for_each_cpu_replicas_entry(r, src) {
+ bytes += replicas_entry_bytes(src);
+ if (src->nr_required != 1)
+ need_v1 = true;
+ }
+
+ if (!need_v1)
+ return bch2_cpu_replicas_to_sb_replicas_v0(c, r);
+
+ sb_r = bch2_sb_field_resize(&c->disk_sb, replicas,
+ DIV_ROUND_UP(bytes, sizeof(u64)));
+ if (!sb_r)
+ return -BCH_ERR_ENOSPC_sb_replicas;
+
+ bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
+ sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas);
+
+ memset(&sb_r->entries, 0,
+ vstruct_end(&sb_r->field) -
+ (void *) &sb_r->entries);
+
+ dst = sb_r->entries;
+ for_each_cpu_replicas_entry(r, src) {
+ memcpy(dst, src, replicas_entry_bytes(src));
+
+ dst = replicas_entry_next(dst);
+
+ BUG_ON((void *) dst > vstruct_end(&sb_r->field));
+ }
+
+ return 0;
+}
+
+static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
+ struct bch_sb *sb,
+ struct printbuf *err)
+{
+ unsigned i, j;
+
+ sort_cmp_size(cpu_r->entries,
+ cpu_r->nr,
+ cpu_r->entry_size,
+ memcmp, NULL);
+
+ for (i = 0; i < cpu_r->nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(cpu_r, i);
+
+ if (e->data_type >= BCH_DATA_NR) {
+ prt_printf(err, "invalid data type in entry ");
+ bch2_replicas_entry_to_text(err, e);
+ return -BCH_ERR_invalid_sb_replicas;
+ }
+
+ if (!e->nr_devs) {
+ prt_printf(err, "no devices in entry ");
+ bch2_replicas_entry_to_text(err, e);
+ return -BCH_ERR_invalid_sb_replicas;
+ }
+
+ if (e->nr_required > 1 &&
+ e->nr_required >= e->nr_devs) {
+ prt_printf(err, "bad nr_required in entry ");
+ bch2_replicas_entry_to_text(err, e);
+ return -BCH_ERR_invalid_sb_replicas;
+ }
+
+ for (j = 0; j < e->nr_devs; j++)
+ if (!bch2_dev_exists(sb, e->devs[j])) {
+ prt_printf(err, "invalid device %u in entry ", e->devs[j]);
+ bch2_replicas_entry_to_text(err, e);
+ return -BCH_ERR_invalid_sb_replicas;
+ }
+
+ if (i + 1 < cpu_r->nr) {
+ struct bch_replicas_entry *n =
+ cpu_replicas_entry(cpu_r, i + 1);
+
+ BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
+
+ if (!memcmp(e, n, cpu_r->entry_size)) {
+ prt_printf(err, "duplicate replicas entry ");
+ bch2_replicas_entry_to_text(err, e);
+ return -BCH_ERR_invalid_sb_replicas;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
+ struct bch_replicas_cpu cpu_r;
+ int ret;
+
+ ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
+ if (ret)
+ return ret;
+
+ ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
+ kfree(cpu_r.entries);
+ return ret;
+}
+
+static void bch2_sb_replicas_to_text(struct printbuf *out,
+ struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_replicas *r = field_to_type(f, replicas);
+ struct bch_replicas_entry *e;
+ bool first = true;
+
+ for_each_replicas_entry(r, e) {
+ if (!first)
+ prt_printf(out, " ");
+ first = false;
+
+ bch2_replicas_entry_to_text(out, e);
+ }
+ prt_newline(out);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
+ .validate = bch2_sb_replicas_validate,
+ .to_text = bch2_sb_replicas_to_text,
+};
+
+static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
+ struct bch_replicas_cpu cpu_r;
+ int ret;
+
+ ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
+ if (ret)
+ return ret;
+
+ ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
+ kfree(cpu_r.entries);
+ return ret;
+}
+
+static void bch2_sb_replicas_v0_to_text(struct printbuf *out,
+ struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
+ struct bch_replicas_entry_v0 *e;
+ bool first = true;
+
+ for_each_replicas_entry(sb_r, e) {
+ if (!first)
+ prt_printf(out, " ");
+ first = false;
+
+ bch2_replicas_entry_v0_to_text(out, e);
+ }
+ prt_newline(out);
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0 = {
+ .validate = bch2_sb_replicas_v0_validate,
+ .to_text = bch2_sb_replicas_v0_to_text,
+};
+
+/* Query replicas: */
+
+bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
+ unsigned flags, bool print)
+{
+ struct bch_replicas_entry *e;
+ bool ret = true;
+
+ percpu_down_read(&c->mark_lock);
+ for_each_cpu_replicas_entry(&c->replicas, e) {
+ unsigned i, nr_online = 0, nr_failed = 0, dflags = 0;
+ bool metadata = e->data_type < BCH_DATA_user;
+
+ if (e->data_type == BCH_DATA_cached)
+ continue;
+
+ for (i = 0; i < e->nr_devs; i++) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
+
+ nr_online += test_bit(e->devs[i], devs.d);
+ nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
+ }
+
+ if (nr_failed == e->nr_devs)
+ continue;
+
+ if (nr_online < e->nr_required)
+ dflags |= metadata
+ ? BCH_FORCE_IF_METADATA_LOST
+ : BCH_FORCE_IF_DATA_LOST;
+
+ if (nr_online < e->nr_devs)
+ dflags |= metadata
+ ? BCH_FORCE_IF_METADATA_DEGRADED
+ : BCH_FORCE_IF_DATA_DEGRADED;
+
+ if (dflags & ~flags) {
+ if (print) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_replicas_entry_to_text(&buf, e);
+ bch_err(c, "insufficient devices online (%u) for replicas entry %s",
+ nr_online, buf.buf);
+ printbuf_exit(&buf);
+ }
+ ret = false;
+ break;
+ }
+
+ }
+ percpu_up_read(&c->mark_lock);
+
+ return ret;
+}
+
+unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev)
+{
+ struct bch_sb_field_replicas *replicas;
+ struct bch_sb_field_replicas_v0 *replicas_v0;
+ unsigned i, data_has = 0;
+
+ replicas = bch2_sb_field_get(sb, replicas);
+ replicas_v0 = bch2_sb_field_get(sb, replicas_v0);
+
+ if (replicas) {
+ struct bch_replicas_entry *r;
+
+ for_each_replicas_entry(replicas, r)
+ for (i = 0; i < r->nr_devs; i++)
+ if (r->devs[i] == dev)
+ data_has |= 1 << r->data_type;
+ } else if (replicas_v0) {
+ struct bch_replicas_entry_v0 *r;
+
+ for_each_replicas_entry_v0(replicas_v0, r)
+ for (i = 0; i < r->nr_devs; i++)
+ if (r->devs[i] == dev)
+ data_has |= 1 << r->data_type;
+ }
+
+
+ return data_has;
+}
+
+unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
+{
+ unsigned ret;
+
+ mutex_lock(&c->sb_lock);
+ ret = bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx);
+ mutex_unlock(&c->sb_lock);
+
+ return ret;
+}
+
+void bch2_fs_replicas_exit(struct bch_fs *c)
+{
+ unsigned i;
+
+ kfree(c->usage_scratch);
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ free_percpu(c->usage[i]);
+ kfree(c->usage_base);
+ kfree(c->replicas.entries);
+ kfree(c->replicas_gc.entries);
+
+ mempool_exit(&c->replicas_delta_pool);
+}
+
+int bch2_fs_replicas_init(struct bch_fs *c)
+{
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->replicas_journal_res,
+ reserve_journal_replicas(c, &c->replicas));
+
+ return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
+ REPLICAS_DELTA_LIST_MAX) ?:
+ replicas_table_update(c, &c->replicas);
+}
diff --git a/fs/bcachefs/replicas.h b/fs/bcachefs/replicas.h
new file mode 100644
index 000000000000..4887675a86f0
--- /dev/null
+++ b/fs/bcachefs/replicas.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REPLICAS_H
+#define _BCACHEFS_REPLICAS_H
+
+#include "bkey.h"
+#include "eytzinger.h"
+#include "replicas_types.h"
+
+void bch2_replicas_entry_sort(struct bch_replicas_entry *);
+void bch2_replicas_entry_to_text(struct printbuf *,
+ struct bch_replicas_entry *);
+void bch2_cpu_replicas_to_text(struct printbuf *, struct bch_replicas_cpu *);
+
+static inline struct bch_replicas_entry *
+cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
+{
+ return (void *) r->entries + r->entry_size * i;
+}
+
+int bch2_replicas_entry_idx(struct bch_fs *,
+ struct bch_replicas_entry *);
+
+void bch2_devlist_to_replicas(struct bch_replicas_entry *,
+ enum bch_data_type,
+ struct bch_devs_list);
+bool bch2_replicas_marked(struct bch_fs *, struct bch_replicas_entry *);
+int bch2_mark_replicas(struct bch_fs *,
+ struct bch_replicas_entry *);
+
+static inline struct replicas_delta *
+replicas_delta_next(struct replicas_delta *d)
+{
+ return (void *) d + replicas_entry_bytes(&d->r) + 8;
+}
+
+int bch2_replicas_delta_list_mark(struct bch_fs *, struct replicas_delta_list *);
+
+void bch2_bkey_to_replicas(struct bch_replicas_entry *, struct bkey_s_c);
+
+static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,
+ unsigned dev)
+{
+ e->data_type = BCH_DATA_cached;
+ e->nr_devs = 1;
+ e->nr_required = 1;
+ e->devs[0] = dev;
+}
+
+bool bch2_have_enough_devs(struct bch_fs *, struct bch_devs_mask,
+ unsigned, bool);
+
+unsigned bch2_sb_dev_has_data(struct bch_sb *, unsigned);
+unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
+
+int bch2_replicas_gc_end(struct bch_fs *, int);
+int bch2_replicas_gc_start(struct bch_fs *, unsigned);
+int bch2_replicas_gc2(struct bch_fs *);
+
+int bch2_replicas_set_usage(struct bch_fs *,
+ struct bch_replicas_entry *,
+ u64);
+
+#define for_each_cpu_replicas_entry(_r, _i) \
+ for (_i = (_r)->entries; \
+ (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
+ _i = (void *) (_i) + (_r)->entry_size)
+
+/* iterate over superblock replicas - used by userspace tools: */
+
+#define replicas_entry_next(_i) \
+ ((typeof(_i)) ((void *) (_i) + replicas_entry_bytes(_i)))
+
+#define for_each_replicas_entry(_r, _i) \
+ for (_i = (_r)->entries; \
+ (void *) (_i) < vstruct_end(&(_r)->field) && (_i)->data_type;\
+ (_i) = replicas_entry_next(_i))
+
+#define for_each_replicas_entry_v0(_r, _i) \
+ for (_i = (_r)->entries; \
+ (void *) (_i) < vstruct_end(&(_r)->field) && (_i)->data_type;\
+ (_i) = replicas_entry_next(_i))
+
+int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_replicas;
+extern const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0;
+
+void bch2_fs_replicas_exit(struct bch_fs *);
+int bch2_fs_replicas_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_REPLICAS_H */
diff --git a/fs/bcachefs/replicas_types.h b/fs/bcachefs/replicas_types.h
new file mode 100644
index 000000000000..5cfff489bbc3
--- /dev/null
+++ b/fs/bcachefs/replicas_types.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_REPLICAS_TYPES_H
+#define _BCACHEFS_REPLICAS_TYPES_H
+
+struct bch_replicas_cpu {
+ unsigned nr;
+ unsigned entry_size;
+ struct bch_replicas_entry *entries;
+};
+
+struct replicas_delta {
+ s64 delta;
+ struct bch_replicas_entry r;
+} __packed;
+
+struct replicas_delta_list {
+ unsigned size;
+ unsigned used;
+
+ struct {} memset_start;
+ u64 nr_inodes;
+ u64 persistent_reserved[BCH_REPLICAS_MAX];
+ struct {} memset_end;
+ struct replicas_delta d[0];
+};
+
+#endif /* _BCACHEFS_REPLICAS_TYPES_H */
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
new file mode 100644
index 000000000000..61203d7c8d36
--- /dev/null
+++ b/fs/bcachefs/sb-clean.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
+#include "error.h"
+#include "journal_io.h"
+#include "replicas.h"
+#include "sb-clean.h"
+#include "super-io.h"
+
+/*
+ * BCH_SB_FIELD_clean:
+ *
+ * Btree roots, and a few other things, are recovered from the journal after an
+ * unclean shutdown - but after a clean shutdown, to avoid having to read the
+ * journal, we can store them in the superblock.
+ *
+ * bch_sb_field_clean simply contains a list of journal entries, stored exactly
+ * as they would be in the journal:
+ */
+
+int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *clean,
+ int write)
+{
+ struct jset_entry *entry;
+ int ret;
+
+ for (entry = clean->start;
+ entry < (struct jset_entry *) vstruct_end(&clean->field);
+ entry = vstruct_next(entry)) {
+ ret = bch2_journal_entry_validate(c, NULL, entry,
+ le16_to_cpu(c->disk_sb.sb->version),
+ BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
+ write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct bkey_i *btree_root_find(struct bch_fs *c,
+ struct bch_sb_field_clean *clean,
+ struct jset *j,
+ enum btree_id id, unsigned *level)
+{
+ struct bkey_i *k;
+ struct jset_entry *entry, *start, *end;
+
+ if (clean) {
+ start = clean->start;
+ end = vstruct_end(&clean->field);
+ } else {
+ start = j->start;
+ end = vstruct_last(j);
+ }
+
+ for (entry = start; entry < end; entry = vstruct_next(entry))
+ if (entry->type == BCH_JSET_ENTRY_btree_root &&
+ entry->btree_id == id)
+ goto found;
+
+ return NULL;
+found:
+ if (!entry->u64s)
+ return ERR_PTR(-EINVAL);
+
+ k = entry->start;
+ *level = entry->level;
+ return k;
+}
+
+int bch2_verify_superblock_clean(struct bch_fs *c,
+ struct bch_sb_field_clean **cleanp,
+ struct jset *j)
+{
+ unsigned i;
+ struct bch_sb_field_clean *clean = *cleanp;
+ struct printbuf buf1 = PRINTBUF;
+ struct printbuf buf2 = PRINTBUF;
+ int ret = 0;
+
+ if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
+ "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
+ le64_to_cpu(clean->journal_seq),
+ le64_to_cpu(j->seq))) {
+ kfree(clean);
+ *cleanp = NULL;
+ return 0;
+ }
+
+ for (i = 0; i < BTREE_ID_NR; i++) {
+ struct bkey_i *k1, *k2;
+ unsigned l1 = 0, l2 = 0;
+
+ k1 = btree_root_find(c, clean, NULL, i, &l1);
+ k2 = btree_root_find(c, NULL, j, i, &l2);
+
+ if (!k1 && !k2)
+ continue;
+
+ printbuf_reset(&buf1);
+ printbuf_reset(&buf2);
+
+ if (k1)
+ bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
+ else
+ prt_printf(&buf1, "(none)");
+
+ if (k2)
+ bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
+ else
+ prt_printf(&buf2, "(none)");
+
+ mustfix_fsck_err_on(!k1 || !k2 ||
+ IS_ERR(k1) ||
+ IS_ERR(k2) ||
+ k1->k.u64s != k2->k.u64s ||
+ memcmp(k1, k2, bkey_bytes(&k1->k)) ||
+ l1 != l2, c,
+ "superblock btree root %u doesn't match journal after clean shutdown\n"
+ "sb: l=%u %s\n"
+ "journal: l=%u %s\n", i,
+ l1, buf1.buf,
+ l2, buf2.buf);
+ }
+fsck_err:
+ printbuf_exit(&buf2);
+ printbuf_exit(&buf1);
+ return ret;
+}
+
+struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
+{
+ struct bch_sb_field_clean *clean, *sb_clean;
+ int ret;
+
+ mutex_lock(&c->sb_lock);
+ sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean);
+
+ if (fsck_err_on(!sb_clean, c,
+ "superblock marked clean but clean section not present")) {
+ SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
+ c->sb.clean = false;
+ mutex_unlock(&c->sb_lock);
+ return NULL;
+ }
+
+ clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
+ GFP_KERNEL);
+ if (!clean) {
+ mutex_unlock(&c->sb_lock);
+ return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
+ }
+
+ ret = bch2_sb_clean_validate_late(c, clean, READ);
+ if (ret) {
+ mutex_unlock(&c->sb_lock);
+ return ERR_PTR(ret);
+ }
+
+ mutex_unlock(&c->sb_lock);
+
+ return clean;
+fsck_err:
+ mutex_unlock(&c->sb_lock);
+ return ERR_PTR(ret);
+}
+
+static struct jset_entry *jset_entry_init(struct jset_entry **end, size_t size)
+{
+ struct jset_entry *entry = *end;
+ unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
+
+ memset(entry, 0, u64s * sizeof(u64));
+ /*
+ * The u64s field counts from the start of data, ignoring the shared
+ * fields.
+ */
+ entry->u64s = cpu_to_le16(u64s - 1);
+
+ *end = vstruct_next(*end);
+ return entry;
+}
+
+void bch2_journal_super_entries_add_common(struct bch_fs *c,
+ struct jset_entry **end,
+ u64 journal_seq)
+{
+ struct bch_dev *ca;
+ unsigned i, dev;
+
+ percpu_down_read(&c->mark_lock);
+
+ if (!journal_seq) {
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ bch2_fs_usage_acc_to_base(c, i);
+ } else {
+ bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
+ }
+
+ {
+ struct jset_entry_usage *u =
+ container_of(jset_entry_init(end, sizeof(*u)),
+ struct jset_entry_usage, entry);
+
+ u->entry.type = BCH_JSET_ENTRY_usage;
+ u->entry.btree_id = BCH_FS_USAGE_inodes;
+ u->v = cpu_to_le64(c->usage_base->nr_inodes);
+ }
+
+ {
+ struct jset_entry_usage *u =
+ container_of(jset_entry_init(end, sizeof(*u)),
+ struct jset_entry_usage, entry);
+
+ u->entry.type = BCH_JSET_ENTRY_usage;
+ u->entry.btree_id = BCH_FS_USAGE_key_version;
+ u->v = cpu_to_le64(atomic64_read(&c->key_version));
+ }
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+ struct jset_entry_usage *u =
+ container_of(jset_entry_init(end, sizeof(*u)),
+ struct jset_entry_usage, entry);
+
+ u->entry.type = BCH_JSET_ENTRY_usage;
+ u->entry.btree_id = BCH_FS_USAGE_reserved;
+ u->entry.level = i;
+ u->v = cpu_to_le64(c->usage_base->persistent_reserved[i]);
+ }
+
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+ struct jset_entry_data_usage *u =
+ container_of(jset_entry_init(end, sizeof(*u) + e->nr_devs),
+ struct jset_entry_data_usage, entry);
+
+ u->entry.type = BCH_JSET_ENTRY_data_usage;
+ u->v = cpu_to_le64(c->usage_base->replicas[i]);
+ unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
+ "embedded variable length struct");
+ }
+
+ for_each_member_device(ca, c, dev) {
+ unsigned b = sizeof(struct jset_entry_dev_usage) +
+ sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
+ struct jset_entry_dev_usage *u =
+ container_of(jset_entry_init(end, b),
+ struct jset_entry_dev_usage, entry);
+
+ u->entry.type = BCH_JSET_ENTRY_dev_usage;
+ u->dev = cpu_to_le32(dev);
+ u->buckets_ec = cpu_to_le64(ca->usage_base->buckets_ec);
+
+ for (i = 0; i < BCH_DATA_NR; i++) {
+ u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
+ u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
+ u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
+ }
+ }
+
+ percpu_up_read(&c->mark_lock);
+
+ for (i = 0; i < 2; i++) {
+ struct jset_entry_clock *clock =
+ container_of(jset_entry_init(end, sizeof(*clock)),
+ struct jset_entry_clock, entry);
+
+ clock->entry.type = BCH_JSET_ENTRY_clock;
+ clock->rw = i;
+ clock->time = cpu_to_le64(atomic64_read(&c->io_clock[i].now));
+ }
+}
+
+static int bch2_sb_clean_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_clean *clean = field_to_type(f, clean);
+
+ if (vstruct_bytes(&clean->field) < sizeof(*clean)) {
+ prt_printf(err, "wrong size (got %zu should be %zu)",
+ vstruct_bytes(&clean->field), sizeof(*clean));
+ return -BCH_ERR_invalid_sb_clean;
+ }
+
+ return 0;
+}
+
+static void bch2_sb_clean_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_clean *clean = field_to_type(f, clean);
+ struct jset_entry *entry;
+
+ prt_printf(out, "flags: %x", le32_to_cpu(clean->flags));
+ prt_newline(out);
+ prt_printf(out, "journal_seq: %llu", le64_to_cpu(clean->journal_seq));
+ prt_newline(out);
+
+ for (entry = clean->start;
+ entry != vstruct_end(&clean->field);
+ entry = vstruct_next(entry)) {
+ if (entry->type == BCH_JSET_ENTRY_btree_keys &&
+ !entry->u64s)
+ continue;
+
+ bch2_journal_entry_to_text(out, NULL, entry);
+ prt_newline(out);
+ }
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_clean = {
+ .validate = bch2_sb_clean_validate,
+ .to_text = bch2_sb_clean_to_text,
+};
+
+int bch2_fs_mark_dirty(struct bch_fs *c)
+{
+ int ret;
+
+ /*
+ * Unconditionally write superblock, to verify it hasn't changed before
+ * we go rw:
+ */
+
+ mutex_lock(&c->sb_lock);
+ SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
+
+ bch2_sb_maybe_downgrade(c);
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
+
+ ret = bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ return ret;
+}
+
+void bch2_fs_mark_clean(struct bch_fs *c)
+{
+ struct bch_sb_field_clean *sb_clean;
+ struct jset_entry *entry;
+ unsigned u64s;
+ int ret;
+
+ mutex_lock(&c->sb_lock);
+ if (BCH_SB_CLEAN(c->disk_sb.sb))
+ goto out;
+
+ SET_BCH_SB_CLEAN(c->disk_sb.sb, true);
+
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
+ c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata);
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates));
+ c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled));
+
+ u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved;
+
+ sb_clean = bch2_sb_field_resize(&c->disk_sb, clean, u64s);
+ if (!sb_clean) {
+ bch_err(c, "error resizing superblock while setting filesystem clean");
+ goto out;
+ }
+
+ sb_clean->flags = 0;
+ sb_clean->journal_seq = cpu_to_le64(atomic64_read(&c->journal.seq));
+
+ /* Trying to catch outstanding bug: */
+ BUG_ON(le64_to_cpu(sb_clean->journal_seq) > S64_MAX);
+
+ entry = sb_clean->start;
+ bch2_journal_super_entries_add_common(c, &entry, 0);
+ entry = bch2_btree_roots_to_journal_entries(c, entry, entry);
+ BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
+
+ memset(entry, 0,
+ vstruct_end(&sb_clean->field) - (void *) entry);
+
+ /*
+ * this should be in the write path, and we should be validating every
+ * superblock section:
+ */
+ ret = bch2_sb_clean_validate_late(c, sb_clean, WRITE);
+ if (ret) {
+ bch_err(c, "error writing marking filesystem clean: validate error");
+ goto out;
+ }
+
+ bch2_write_super(c);
+out:
+ mutex_unlock(&c->sb_lock);
+}
diff --git a/fs/bcachefs/sb-clean.h b/fs/bcachefs/sb-clean.h
new file mode 100644
index 000000000000..71caef281239
--- /dev/null
+++ b/fs/bcachefs/sb-clean.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_CLEAN_H
+#define _BCACHEFS_SB_CLEAN_H
+
+int bch2_sb_clean_validate_late(struct bch_fs *, struct bch_sb_field_clean *, int);
+int bch2_verify_superblock_clean(struct bch_fs *, struct bch_sb_field_clean **,
+ struct jset *);
+struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *);
+void bch2_journal_super_entries_add_common(struct bch_fs *, struct jset_entry **, u64);
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_clean;
+
+int bch2_fs_mark_dirty(struct bch_fs *);
+void bch2_fs_mark_clean(struct bch_fs *);
+
+#endif /* _BCACHEFS_SB_CLEAN_H */
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
new file mode 100644
index 000000000000..6dd85bb996fe
--- /dev/null
+++ b/fs/bcachefs/sb-members.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "disk_groups.h"
+#include "opts.h"
+#include "replicas.h"
+#include "sb-members.h"
+#include "super-io.h"
+
+/* Code for bch_sb_field_members_v1: */
+
+static struct bch_member *members_v2_get_mut(struct bch_sb_field_members_v2 *mi, int i)
+{
+ return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
+}
+
+struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i)
+{
+ return members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i);
+}
+
+static struct bch_member members_v2_get(struct bch_sb_field_members_v2 *mi, int i)
+{
+ struct bch_member ret, *p = members_v2_get_mut(mi, i);
+ memset(&ret, 0, sizeof(ret));
+ memcpy(&ret, p, min_t(size_t, le16_to_cpu(mi->member_bytes), sizeof(ret)));
+ return ret;
+}
+
+static struct bch_member *members_v1_get_mut(struct bch_sb_field_members_v1 *mi, int i)
+{
+ return (void *) mi->_members + (i * BCH_MEMBER_V1_BYTES);
+}
+
+static struct bch_member members_v1_get(struct bch_sb_field_members_v1 *mi, int i)
+{
+ struct bch_member ret, *p = members_v1_get_mut(mi, i);
+ memset(&ret, 0, sizeof(ret));
+ memcpy(&ret, p, min_t(size_t, sizeof(struct bch_member), sizeof(ret))); return ret;
+}
+
+struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i)
+{
+ struct bch_sb_field_members_v2 *mi2 = bch2_sb_field_get(sb, members_v2);
+ if (mi2)
+ return members_v2_get(mi2, i);
+ struct bch_sb_field_members_v1 *mi1 = bch2_sb_field_get(sb, members_v1);
+ return members_v1_get(mi1, i);
+}
+
+static int sb_members_v2_resize_entries(struct bch_fs *c)
+{
+ struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+
+ if (le16_to_cpu(mi->member_bytes) < sizeof(struct bch_member)) {
+ unsigned u64s = DIV_ROUND_UP((sizeof(*mi) + sizeof(mi->_members[0]) *
+ c->disk_sb.sb->nr_devices), 8);
+
+ mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
+ if (!mi)
+ return -BCH_ERR_ENOSPC_sb_members_v2;
+
+ for (int i = c->disk_sb.sb->nr_devices - 1; i >= 0; --i) {
+ void *dst = (void *) mi->_members + (i * sizeof(struct bch_member));
+ memmove(dst, members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes));
+ memset(dst + le16_to_cpu(mi->member_bytes),
+ 0, (sizeof(struct bch_member) - le16_to_cpu(mi->member_bytes)));
+ }
+ mi->member_bytes = cpu_to_le16(sizeof(struct bch_member));
+ }
+ return 0;
+}
+
+int bch2_members_v2_init(struct bch_fs *c)
+{
+ struct bch_sb_field_members_v1 *mi1;
+ struct bch_sb_field_members_v2 *mi2;
+
+ if (!bch2_sb_field_get(c->disk_sb.sb, members_v2)) {
+ mi2 = bch2_sb_field_resize(&c->disk_sb, members_v2,
+ DIV_ROUND_UP(sizeof(*mi2) +
+ sizeof(struct bch_member) * c->sb.nr_devices,
+ sizeof(u64)));
+ mi1 = bch2_sb_field_get(c->disk_sb.sb, members_v1);
+ memcpy(&mi2->_members[0], &mi1->_members[0],
+ BCH_MEMBER_V1_BYTES * c->sb.nr_devices);
+ memset(&mi2->pad[0], 0, sizeof(mi2->pad));
+ mi2->member_bytes = cpu_to_le16(BCH_MEMBER_V1_BYTES);
+ }
+
+ return sb_members_v2_resize_entries(c);
+}
+
+int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
+{
+ struct bch_sb_field_members_v1 *mi1;
+ struct bch_sb_field_members_v2 *mi2;
+
+ mi1 = bch2_sb_field_resize(disk_sb, members_v1,
+ DIV_ROUND_UP(sizeof(*mi1) + BCH_MEMBER_V1_BYTES *
+ disk_sb->sb->nr_devices, sizeof(u64)));
+ if (!mi1)
+ return -BCH_ERR_ENOSPC_sb_members;
+
+ mi2 = bch2_sb_field_get(disk_sb->sb, members_v2);
+
+ for (unsigned i = 0; i < disk_sb->sb->nr_devices; i++)
+ memcpy(members_v1_get_mut(mi1, i), members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES);
+
+ return 0;
+}
+
+static int validate_member(struct printbuf *err,
+ struct bch_member m,
+ struct bch_sb *sb,
+ int i)
+{
+ if (le64_to_cpu(m.nbuckets) > LONG_MAX) {
+ prt_printf(err, "device %u: too many buckets (got %llu, max %lu)",
+ i, le64_to_cpu(m.nbuckets), LONG_MAX);
+ return -BCH_ERR_invalid_sb_members;
+ }
+
+ if (le64_to_cpu(m.nbuckets) -
+ le16_to_cpu(m.first_bucket) < BCH_MIN_NR_NBUCKETS) {
+ prt_printf(err, "device %u: not enough buckets (got %llu, max %u)",
+ i, le64_to_cpu(m.nbuckets), BCH_MIN_NR_NBUCKETS);
+ return -BCH_ERR_invalid_sb_members;
+ }
+
+ if (le16_to_cpu(m.bucket_size) <
+ le16_to_cpu(sb->block_size)) {
+ prt_printf(err, "device %u: bucket size %u smaller than block size %u",
+ i, le16_to_cpu(m.bucket_size), le16_to_cpu(sb->block_size));
+ return -BCH_ERR_invalid_sb_members;
+ }
+
+ if (le16_to_cpu(m.bucket_size) <
+ BCH_SB_BTREE_NODE_SIZE(sb)) {
+ prt_printf(err, "device %u: bucket size %u smaller than btree node size %llu",
+ i, le16_to_cpu(m.bucket_size), BCH_SB_BTREE_NODE_SIZE(sb));
+ return -BCH_ERR_invalid_sb_members;
+ }
+
+ return 0;
+}
+
+static void member_to_text(struct printbuf *out,
+ struct bch_member m,
+ struct bch_sb_field_disk_groups *gi,
+ struct bch_sb *sb,
+ int i)
+{
+ unsigned data_have = bch2_sb_dev_has_data(sb, i);
+ u64 bucket_size = le16_to_cpu(m.bucket_size);
+ u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size;
+
+
+ prt_printf(out, "Device:");
+ prt_tab(out);
+ prt_printf(out, "%u", i);
+ prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "UUID:");
+ prt_tab(out);
+ pr_uuid(out, m.uuid.b);
+ prt_newline(out);
+
+ prt_printf(out, "Size:");
+ prt_tab(out);
+ prt_units_u64(out, device_size << 9);
+ prt_newline(out);
+
+ for (unsigned i = 0; i < BCH_IOPS_NR; i++) {
+ prt_printf(out, "%s iops:", bch2_iops_measurements[i]);
+ prt_tab(out);
+ prt_printf(out, "%u", le32_to_cpu(m.iops[i]));
+ prt_newline(out);
+ }
+
+ prt_printf(out, "Bucket size:");
+ prt_tab(out);
+ prt_units_u64(out, bucket_size << 9);
+ prt_newline(out);
+
+ prt_printf(out, "First bucket:");
+ prt_tab(out);
+ prt_printf(out, "%u", le16_to_cpu(m.first_bucket));
+ prt_newline(out);
+
+ prt_printf(out, "Buckets:");
+ prt_tab(out);
+ prt_printf(out, "%llu", le64_to_cpu(m.nbuckets));
+ prt_newline(out);
+
+ prt_printf(out, "Last mount:");
+ prt_tab(out);
+ if (m.last_mount)
+ pr_time(out, le64_to_cpu(m.last_mount));
+ else
+ prt_printf(out, "(never)");
+ prt_newline(out);
+
+ prt_printf(out, "State:");
+ prt_tab(out);
+ prt_printf(out, "%s",
+ BCH_MEMBER_STATE(&m) < BCH_MEMBER_STATE_NR
+ ? bch2_member_states[BCH_MEMBER_STATE(&m)]
+ : "unknown");
+ prt_newline(out);
+
+ prt_printf(out, "Label:");
+ prt_tab(out);
+ if (BCH_MEMBER_GROUP(&m)) {
+ unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
+
+ if (idx < disk_groups_nr(gi))
+ prt_printf(out, "%s (%u)",
+ gi->entries[idx].label, idx);
+ else
+ prt_printf(out, "(bad disk labels section)");
+ } else {
+ prt_printf(out, "(none)");
+ }
+ prt_newline(out);
+
+ prt_printf(out, "Data allowed:");
+ prt_tab(out);
+ if (BCH_MEMBER_DATA_ALLOWED(&m))
+ prt_bitflags(out, bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
+ else
+ prt_printf(out, "(none)");
+ prt_newline(out);
+
+ prt_printf(out, "Has data:");
+ prt_tab(out);
+ if (data_have)
+ prt_bitflags(out, bch2_data_types, data_have);
+ else
+ prt_printf(out, "(none)");
+ prt_newline(out);
+
+ prt_printf(out, "Discard:");
+ prt_tab(out);
+ prt_printf(out, "%llu", BCH_MEMBER_DISCARD(&m));
+ prt_newline(out);
+
+ prt_printf(out, "Freespace initialized:");
+ prt_tab(out);
+ prt_printf(out, "%llu", BCH_MEMBER_FREESPACE_INITIALIZED(&m));
+ prt_newline(out);
+
+ printbuf_indent_sub(out, 2);
+}
+
+static int bch2_sb_members_v1_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
+ unsigned i;
+
+ if ((void *) members_v1_get_mut(mi, sb->nr_devices) >
+ vstruct_end(&mi->field)) {
+ prt_printf(err, "too many devices for section size");
+ return -BCH_ERR_invalid_sb_members;
+ }
+
+ for (i = 0; i < sb->nr_devices; i++) {
+ struct bch_member m = members_v1_get(mi, i);
+
+ int ret = validate_member(err, m, sb, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bch2_sb_members_v1_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
+ struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
+ unsigned i;
+
+ for (i = 0; i < sb->nr_devices; i++) {
+ struct bch_member m = members_v1_get(mi, i);
+ member_to_text(out, m, gi, sb, i);
+ }
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_members_v1 = {
+ .validate = bch2_sb_members_v1_validate,
+ .to_text = bch2_sb_members_v1_to_text,
+};
+
+static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
+ struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
+ unsigned i;
+
+ for (i = 0; i < sb->nr_devices; i++) {
+ struct bch_member m = members_v2_get(mi, i);
+ member_to_text(out, m, gi, sb, i);
+ }
+}
+
+static int bch2_sb_members_v2_validate(struct bch_sb *sb,
+ struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
+ size_t mi_bytes = (void *) members_v2_get_mut(mi, sb->nr_devices) -
+ (void *) mi;
+
+ if (mi_bytes > vstruct_bytes(&mi->field)) {
+ prt_printf(err, "section too small (%zu > %zu)",
+ mi_bytes, vstruct_bytes(&mi->field));
+ return -BCH_ERR_invalid_sb_members;
+ }
+
+ for (unsigned i = 0; i < sb->nr_devices; i++) {
+ int ret = validate_member(err, members_v2_get(mi, i), sb, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = {
+ .validate = bch2_sb_members_v2_validate,
+ .to_text = bch2_sb_members_v2_to_text,
+};
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
new file mode 100644
index 000000000000..430f3457bfd4
--- /dev/null
+++ b/fs/bcachefs/sb-members.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_MEMBERS_H
+#define _BCACHEFS_SB_MEMBERS_H
+
+int bch2_members_v2_init(struct bch_fs *c);
+int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
+struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i);
+struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
+
+static inline bool bch2_dev_is_online(struct bch_dev *ca)
+{
+ return !percpu_ref_is_zero(&ca->io_ref);
+}
+
+static inline bool bch2_dev_is_readable(struct bch_dev *ca)
+{
+ return bch2_dev_is_online(ca) &&
+ ca->mi.state != BCH_MEMBER_STATE_failed;
+}
+
+static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
+{
+ if (!percpu_ref_tryget(&ca->io_ref))
+ return false;
+
+ if (ca->mi.state == BCH_MEMBER_STATE_rw ||
+ (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
+ return true;
+
+ percpu_ref_put(&ca->io_ref);
+ return false;
+}
+
+static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
+{
+ return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
+}
+
+static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
+ unsigned dev)
+{
+ unsigned i;
+
+ for (i = 0; i < devs.nr; i++)
+ if (devs.devs[i] == dev)
+ return true;
+
+ return false;
+}
+
+static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
+ unsigned dev)
+{
+ unsigned i;
+
+ for (i = 0; i < devs->nr; i++)
+ if (devs->devs[i] == dev) {
+ array_remove_item(devs->devs, devs->nr, i);
+ return;
+ }
+}
+
+static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
+ unsigned dev)
+{
+ if (!bch2_dev_list_has_dev(*devs, dev)) {
+ BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
+ devs->devs[devs->nr++] = dev;
+ }
+}
+
+static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
+{
+ return (struct bch_devs_list) { .nr = 1, .devs[0] = dev };
+}
+
+static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
+ const struct bch_devs_mask *mask)
+{
+ struct bch_dev *ca = NULL;
+
+ while ((*iter = mask
+ ? find_next_bit(mask->d, c->sb.nr_devices, *iter)
+ : *iter) < c->sb.nr_devices &&
+ !(ca = rcu_dereference_check(c->devs[*iter],
+ lockdep_is_held(&c->state_lock))))
+ (*iter)++;
+
+ return ca;
+}
+
+#define for_each_member_device_rcu(ca, c, iter, mask) \
+ for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
+
+static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
+{
+ struct bch_dev *ca;
+
+ rcu_read_lock();
+ if ((ca = __bch2_next_dev(c, iter, NULL)))
+ percpu_ref_get(&ca->ref);
+ rcu_read_unlock();
+
+ return ca;
+}
+
+/*
+ * If you break early, you must drop your ref on the current device
+ */
+#define for_each_member_device(ca, c, iter) \
+ for ((iter) = 0; \
+ (ca = bch2_get_next_dev(c, &(iter))); \
+ percpu_ref_put(&ca->ref), (iter)++)
+
+static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
+ unsigned *iter,
+ int state_mask)
+{
+ struct bch_dev *ca;
+
+ rcu_read_lock();
+ while ((ca = __bch2_next_dev(c, iter, NULL)) &&
+ (!((1 << ca->mi.state) & state_mask) ||
+ !percpu_ref_tryget(&ca->io_ref)))
+ (*iter)++;
+ rcu_read_unlock();
+
+ return ca;
+}
+
+#define __for_each_online_member(ca, c, iter, state_mask) \
+ for ((iter) = 0; \
+ (ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
+ percpu_ref_put(&ca->io_ref), (iter)++)
+
+#define for_each_online_member(ca, c, iter) \
+ __for_each_online_member(ca, c, iter, ~0)
+
+#define for_each_rw_member(ca, c, iter) \
+ __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
+
+#define for_each_readable_member(ca, c, iter) \
+ __for_each_online_member(ca, c, iter, \
+ (1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
+
+/*
+ * If a key exists that references a device, the device won't be going away and
+ * we can omit rcu_read_lock():
+ */
+static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
+{
+ EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
+
+ return rcu_dereference_check(c->devs[idx], 1);
+}
+
+static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
+{
+ EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
+
+ return rcu_dereference_protected(c->devs[idx],
+ lockdep_is_held(&c->sb_lock) ||
+ lockdep_is_held(&c->state_lock));
+}
+
+/* XXX kill, move to struct bch_fs */
+static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
+{
+ struct bch_devs_mask devs;
+ struct bch_dev *ca;
+ unsigned i;
+
+ memset(&devs, 0, sizeof(devs));
+ for_each_online_member(ca, c, i)
+ __set_bit(ca->dev_idx, devs.d);
+ return devs;
+}
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
+extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
+
+#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/seqmutex.h b/fs/bcachefs/seqmutex.h
new file mode 100644
index 000000000000..c1860d8163fb
--- /dev/null
+++ b/fs/bcachefs/seqmutex.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SEQMUTEX_H
+#define _BCACHEFS_SEQMUTEX_H
+
+#include <linux/mutex.h>
+
+struct seqmutex {
+ struct mutex lock;
+ u32 seq;
+};
+
+#define seqmutex_init(_lock) mutex_init(&(_lock)->lock)
+
+static inline bool seqmutex_trylock(struct seqmutex *lock)
+{
+ return mutex_trylock(&lock->lock);
+}
+
+static inline void seqmutex_lock(struct seqmutex *lock)
+{
+ mutex_lock(&lock->lock);
+}
+
+static inline void seqmutex_unlock(struct seqmutex *lock)
+{
+ lock->seq++;
+ mutex_unlock(&lock->lock);
+}
+
+static inline u32 seqmutex_seq(struct seqmutex *lock)
+{
+ return lock->seq;
+}
+
+static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
+{
+ if (lock->seq != seq || !mutex_trylock(&lock->lock))
+ return false;
+
+ if (lock->seq != seq) {
+ mutex_unlock(&lock->lock);
+ return false;
+ }
+
+ return true;
+}
+
+#endif /* _BCACHEFS_SEQMUTEX_H */
diff --git a/fs/bcachefs/siphash.c b/fs/bcachefs/siphash.c
new file mode 100644
index 000000000000..dc1a27cc31cd
--- /dev/null
+++ b/fs/bcachefs/siphash.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* $OpenBSD: siphash.c,v 1.3 2015/02/20 11:51:03 tedu Exp $ */
+
+/*-
+ * Copyright (c) 2013 Andre Oppermann <andre@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * SipHash is a family of PRFs SipHash-c-d where the integer parameters c and d
+ * are the number of compression rounds and the number of finalization rounds.
+ * A compression round is identical to a finalization round and this round
+ * function is called SipRound. Given a 128-bit key k and a (possibly empty)
+ * byte string m, SipHash-c-d returns a 64-bit value SipHash-c-d(k; m).
+ *
+ * Implemented from the paper "SipHash: a fast short-input PRF", 2012.09.18,
+ * by Jean-Philippe Aumasson and Daniel J. Bernstein,
+ * Permanent Document ID b9a943a805fbfc6fde808af9fc0ecdfa
+ * https://131002.net/siphash/siphash.pdf
+ * https://131002.net/siphash/
+ */
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+
+#include "siphash.h"
+
+static void SipHash_Rounds(SIPHASH_CTX *ctx, int rounds)
+{
+ while (rounds--) {
+ ctx->v[0] += ctx->v[1];
+ ctx->v[2] += ctx->v[3];
+ ctx->v[1] = rol64(ctx->v[1], 13);
+ ctx->v[3] = rol64(ctx->v[3], 16);
+
+ ctx->v[1] ^= ctx->v[0];
+ ctx->v[3] ^= ctx->v[2];
+ ctx->v[0] = rol64(ctx->v[0], 32);
+
+ ctx->v[2] += ctx->v[1];
+ ctx->v[0] += ctx->v[3];
+ ctx->v[1] = rol64(ctx->v[1], 17);
+ ctx->v[3] = rol64(ctx->v[3], 21);
+
+ ctx->v[1] ^= ctx->v[2];
+ ctx->v[3] ^= ctx->v[0];
+ ctx->v[2] = rol64(ctx->v[2], 32);
+ }
+}
+
+static void SipHash_CRounds(SIPHASH_CTX *ctx, const void *ptr, int rounds)
+{
+ u64 m = get_unaligned_le64(ptr);
+
+ ctx->v[3] ^= m;
+ SipHash_Rounds(ctx, rounds);
+ ctx->v[0] ^= m;
+}
+
+void SipHash_Init(SIPHASH_CTX *ctx, const SIPHASH_KEY *key)
+{
+ u64 k0, k1;
+
+ k0 = le64_to_cpu(key->k0);
+ k1 = le64_to_cpu(key->k1);
+
+ ctx->v[0] = 0x736f6d6570736575ULL ^ k0;
+ ctx->v[1] = 0x646f72616e646f6dULL ^ k1;
+ ctx->v[2] = 0x6c7967656e657261ULL ^ k0;
+ ctx->v[3] = 0x7465646279746573ULL ^ k1;
+
+ memset(ctx->buf, 0, sizeof(ctx->buf));
+ ctx->bytes = 0;
+}
+
+void SipHash_Update(SIPHASH_CTX *ctx, int rc, int rf,
+ const void *src, size_t len)
+{
+ const u8 *ptr = src;
+ size_t left, used;
+
+ if (len == 0)
+ return;
+
+ used = ctx->bytes % sizeof(ctx->buf);
+ ctx->bytes += len;
+
+ if (used > 0) {
+ left = sizeof(ctx->buf) - used;
+
+ if (len >= left) {
+ memcpy(&ctx->buf[used], ptr, left);
+ SipHash_CRounds(ctx, ctx->buf, rc);
+ len -= left;
+ ptr += left;
+ } else {
+ memcpy(&ctx->buf[used], ptr, len);
+ return;
+ }
+ }
+
+ while (len >= sizeof(ctx->buf)) {
+ SipHash_CRounds(ctx, ptr, rc);
+ len -= sizeof(ctx->buf);
+ ptr += sizeof(ctx->buf);
+ }
+
+ if (len > 0)
+ memcpy(&ctx->buf[used], ptr, len);
+}
+
+void SipHash_Final(void *dst, SIPHASH_CTX *ctx, int rc, int rf)
+{
+ u64 r;
+
+ r = SipHash_End(ctx, rc, rf);
+
+ *((__le64 *) dst) = cpu_to_le64(r);
+}
+
+u64 SipHash_End(SIPHASH_CTX *ctx, int rc, int rf)
+{
+ u64 r;
+ size_t left, used;
+
+ used = ctx->bytes % sizeof(ctx->buf);
+ left = sizeof(ctx->buf) - used;
+ memset(&ctx->buf[used], 0, left - 1);
+ ctx->buf[7] = ctx->bytes;
+
+ SipHash_CRounds(ctx, ctx->buf, rc);
+ ctx->v[2] ^= 0xff;
+ SipHash_Rounds(ctx, rf);
+
+ r = (ctx->v[0] ^ ctx->v[1]) ^ (ctx->v[2] ^ ctx->v[3]);
+ memset(ctx, 0, sizeof(*ctx));
+ return r;
+}
+
+u64 SipHash(const SIPHASH_KEY *key, int rc, int rf, const void *src, size_t len)
+{
+ SIPHASH_CTX ctx;
+
+ SipHash_Init(&ctx, key);
+ SipHash_Update(&ctx, rc, rf, src, len);
+ return SipHash_End(&ctx, rc, rf);
+}
diff --git a/fs/bcachefs/siphash.h b/fs/bcachefs/siphash.h
new file mode 100644
index 000000000000..3dfaf34a43b2
--- /dev/null
+++ b/fs/bcachefs/siphash.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* $OpenBSD: siphash.h,v 1.5 2015/02/20 11:51:03 tedu Exp $ */
+/*-
+ * Copyright (c) 2013 Andre Oppermann <andre@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * SipHash is a family of pseudorandom functions (a.k.a. keyed hash functions)
+ * optimized for speed on short messages returning a 64bit hash/digest value.
+ *
+ * The number of rounds is defined during the initialization:
+ * SipHash24_Init() for the fast and resonable strong version
+ * SipHash48_Init() for the strong version (half as fast)
+ *
+ * struct SIPHASH_CTX ctx;
+ * SipHash24_Init(&ctx);
+ * SipHash_SetKey(&ctx, "16bytes long key");
+ * SipHash_Update(&ctx, pointer_to_string, length_of_string);
+ * SipHash_Final(output, &ctx);
+ */
+
+#ifndef _SIPHASH_H_
+#define _SIPHASH_H_
+
+#include <linux/types.h>
+
+#define SIPHASH_BLOCK_LENGTH 8
+#define SIPHASH_KEY_LENGTH 16
+#define SIPHASH_DIGEST_LENGTH 8
+
+typedef struct _SIPHASH_CTX {
+ u64 v[4];
+ u8 buf[SIPHASH_BLOCK_LENGTH];
+ u32 bytes;
+} SIPHASH_CTX;
+
+typedef struct {
+ __le64 k0;
+ __le64 k1;
+} SIPHASH_KEY;
+
+void SipHash_Init(SIPHASH_CTX *, const SIPHASH_KEY *);
+void SipHash_Update(SIPHASH_CTX *, int, int, const void *, size_t);
+u64 SipHash_End(SIPHASH_CTX *, int, int);
+void SipHash_Final(void *, SIPHASH_CTX *, int, int);
+u64 SipHash(const SIPHASH_KEY *, int, int, const void *, size_t);
+
+#define SipHash24_Init(_c, _k) SipHash_Init((_c), (_k))
+#define SipHash24_Update(_c, _p, _l) SipHash_Update((_c), 2, 4, (_p), (_l))
+#define SipHash24_End(_d) SipHash_End((_d), 2, 4)
+#define SipHash24_Final(_d, _c) SipHash_Final((_d), (_c), 2, 4)
+#define SipHash24(_k, _p, _l) SipHash((_k), 2, 4, (_p), (_l))
+
+#define SipHash48_Init(_c, _k) SipHash_Init((_c), (_k))
+#define SipHash48_Update(_c, _p, _l) SipHash_Update((_c), 4, 8, (_p), (_l))
+#define SipHash48_End(_d) SipHash_End((_d), 4, 8)
+#define SipHash48_Final(_d, _c) SipHash_Final((_d), (_c), 4, 8)
+#define SipHash48(_k, _p, _l) SipHash((_k), 4, 8, (_p), (_l))
+
+#endif /* _SIPHASH_H_ */
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
new file mode 100644
index 000000000000..b684b9f00c1b
--- /dev/null
+++ b/fs/bcachefs/six.c
@@ -0,0 +1,913 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/export.h>
+#include <linux/log2.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/sched/rt.h>
+#include <linux/sched/task.h>
+#include <linux/slab.h>
+
+#include "six.h"
+
+#ifdef DEBUG
+#define EBUG_ON(cond) BUG_ON(cond)
+#else
+#define EBUG_ON(cond) do {} while (0)
+#endif
+
+#define six_acquire(l, t, r, ip) lock_acquire(l, 0, t, r, 1, NULL, ip)
+#define six_release(l, ip) lock_release(l, ip)
+
+static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type);
+
+#define SIX_LOCK_HELD_read_OFFSET 0
+#define SIX_LOCK_HELD_read ~(~0U << 26)
+#define SIX_LOCK_HELD_intent (1U << 26)
+#define SIX_LOCK_HELD_write (1U << 27)
+#define SIX_LOCK_WAITING_read (1U << (28 + SIX_LOCK_read))
+#define SIX_LOCK_WAITING_write (1U << (28 + SIX_LOCK_write))
+#define SIX_LOCK_NOSPIN (1U << 31)
+
+struct six_lock_vals {
+ /* Value we add to the lock in order to take the lock: */
+ u32 lock_val;
+
+ /* If the lock has this value (used as a mask), taking the lock fails: */
+ u32 lock_fail;
+
+ /* Mask that indicates lock is held for this type: */
+ u32 held_mask;
+
+ /* Waitlist we wakeup when releasing the lock: */
+ enum six_lock_type unlock_wakeup;
+};
+
+static const struct six_lock_vals l[] = {
+ [SIX_LOCK_read] = {
+ .lock_val = 1U << SIX_LOCK_HELD_read_OFFSET,
+ .lock_fail = SIX_LOCK_HELD_write,
+ .held_mask = SIX_LOCK_HELD_read,
+ .unlock_wakeup = SIX_LOCK_write,
+ },
+ [SIX_LOCK_intent] = {
+ .lock_val = SIX_LOCK_HELD_intent,
+ .lock_fail = SIX_LOCK_HELD_intent,
+ .held_mask = SIX_LOCK_HELD_intent,
+ .unlock_wakeup = SIX_LOCK_intent,
+ },
+ [SIX_LOCK_write] = {
+ .lock_val = SIX_LOCK_HELD_write,
+ .lock_fail = SIX_LOCK_HELD_read,
+ .held_mask = SIX_LOCK_HELD_write,
+ .unlock_wakeup = SIX_LOCK_read,
+ },
+};
+
+static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
+{
+ if ((atomic_read(&lock->state) & mask) != mask)
+ atomic_or(mask, &lock->state);
+}
+
+static inline void six_clear_bitmask(struct six_lock *lock, u32 mask)
+{
+ if (atomic_read(&lock->state) & mask)
+ atomic_and(~mask, &lock->state);
+}
+
+static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
+ u32 old, struct task_struct *owner)
+{
+ if (type != SIX_LOCK_intent)
+ return;
+
+ if (!(old & SIX_LOCK_HELD_intent)) {
+ EBUG_ON(lock->owner);
+ lock->owner = owner;
+ } else {
+ EBUG_ON(lock->owner != current);
+ }
+}
+
+static inline unsigned pcpu_read_count(struct six_lock *lock)
+{
+ unsigned read_count = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ read_count += *per_cpu_ptr(lock->readers, cpu);
+ return read_count;
+}
+
+/*
+ * __do_six_trylock() - main trylock routine
+ *
+ * Returns 1 on success, 0 on failure
+ *
+ * In percpu reader mode, a failed trylock may cause a spurious trylock failure
+ * for anoter thread taking the competing lock type, and we may havve to do a
+ * wakeup: when a wakeup is required, we return -1 - wakeup_type.
+ */
+static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
+ struct task_struct *task, bool try)
+{
+ int ret;
+ u32 old;
+
+ EBUG_ON(type == SIX_LOCK_write && lock->owner != task);
+ EBUG_ON(type == SIX_LOCK_write &&
+ (try != !(atomic_read(&lock->state) & SIX_LOCK_HELD_write)));
+
+ /*
+ * Percpu reader mode:
+ *
+ * The basic idea behind this algorithm is that you can implement a lock
+ * between two threads without any atomics, just memory barriers:
+ *
+ * For two threads you'll need two variables, one variable for "thread a
+ * has the lock" and another for "thread b has the lock".
+ *
+ * To take the lock, a thread sets its variable indicating that it holds
+ * the lock, then issues a full memory barrier, then reads from the
+ * other thread's variable to check if the other thread thinks it has
+ * the lock. If we raced, we backoff and retry/sleep.
+ *
+ * Failure to take the lock may cause a spurious trylock failure in
+ * another thread, because we temporarily set the lock to indicate that
+ * we held it. This would be a problem for a thread in six_lock(), when
+ * they are calling trylock after adding themself to the waitlist and
+ * prior to sleeping.
+ *
+ * Therefore, if we fail to get the lock, and there were waiters of the
+ * type we conflict with, we will have to issue a wakeup.
+ *
+ * Since we may be called under wait_lock (and by the wakeup code
+ * itself), we return that the wakeup has to be done instead of doing it
+ * here.
+ */
+ if (type == SIX_LOCK_read && lock->readers) {
+ preempt_disable();
+ this_cpu_inc(*lock->readers); /* signal that we own lock */
+
+ smp_mb();
+
+ old = atomic_read(&lock->state);
+ ret = !(old & l[type].lock_fail);
+
+ this_cpu_sub(*lock->readers, !ret);
+ preempt_enable();
+
+ if (!ret && (old & SIX_LOCK_WAITING_write))
+ ret = -1 - SIX_LOCK_write;
+ } else if (type == SIX_LOCK_write && lock->readers) {
+ if (try) {
+ atomic_add(SIX_LOCK_HELD_write, &lock->state);
+ smp_mb__after_atomic();
+ }
+
+ ret = !pcpu_read_count(lock);
+
+ if (try && !ret) {
+ old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state);
+ if (old & SIX_LOCK_WAITING_read)
+ ret = -1 - SIX_LOCK_read;
+ }
+ } else {
+ old = atomic_read(&lock->state);
+ do {
+ ret = !(old & l[type].lock_fail);
+ if (!ret || (type == SIX_LOCK_write && !try)) {
+ smp_mb();
+ break;
+ }
+ } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, old + l[type].lock_val));
+
+ EBUG_ON(ret && !(atomic_read(&lock->state) & l[type].held_mask));
+ }
+
+ if (ret > 0)
+ six_set_owner(lock, type, old, task);
+
+ EBUG_ON(type == SIX_LOCK_write && try && ret <= 0 &&
+ (atomic_read(&lock->state) & SIX_LOCK_HELD_write));
+
+ return ret;
+}
+
+static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type)
+{
+ struct six_lock_waiter *w, *next;
+ struct task_struct *task;
+ bool saw_one;
+ int ret;
+again:
+ ret = 0;
+ saw_one = false;
+ raw_spin_lock(&lock->wait_lock);
+
+ list_for_each_entry_safe(w, next, &lock->wait_list, list) {
+ if (w->lock_want != lock_type)
+ continue;
+
+ if (saw_one && lock_type != SIX_LOCK_read)
+ goto unlock;
+ saw_one = true;
+
+ ret = __do_six_trylock(lock, lock_type, w->task, false);
+ if (ret <= 0)
+ goto unlock;
+
+ /*
+ * Similar to percpu_rwsem_wake_function(), we need to guard
+ * against the wakee noticing w->lock_acquired, returning, and
+ * then exiting before we do the wakeup:
+ */
+ task = get_task_struct(w->task);
+ __list_del(w->list.prev, w->list.next);
+ /*
+ * The release barrier here ensures the ordering of the
+ * __list_del before setting w->lock_acquired; @w is on the
+ * stack of the thread doing the waiting and will be reused
+ * after it sees w->lock_acquired with no other locking:
+ * pairs with smp_load_acquire() in six_lock_slowpath()
+ */
+ smp_store_release(&w->lock_acquired, true);
+ wake_up_process(task);
+ put_task_struct(task);
+ }
+
+ six_clear_bitmask(lock, SIX_LOCK_WAITING_read << lock_type);
+unlock:
+ raw_spin_unlock(&lock->wait_lock);
+
+ if (ret < 0) {
+ lock_type = -ret - 1;
+ goto again;
+ }
+}
+
+__always_inline
+static void six_lock_wakeup(struct six_lock *lock, u32 state,
+ enum six_lock_type lock_type)
+{
+ if (lock_type == SIX_LOCK_write && (state & SIX_LOCK_HELD_read))
+ return;
+
+ if (!(state & (SIX_LOCK_WAITING_read << lock_type)))
+ return;
+
+ __six_lock_wakeup(lock, lock_type);
+}
+
+__always_inline
+static bool do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try)
+{
+ int ret;
+
+ ret = __do_six_trylock(lock, type, current, try);
+ if (ret < 0)
+ __six_lock_wakeup(lock, -ret - 1);
+
+ return ret > 0;
+}
+
+/**
+ * six_trylock_ip - attempt to take a six lock without blocking
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
+ *
+ * Return: true on success, false on failure.
+ */
+bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
+{
+ if (!do_six_trylock(lock, type, true))
+ return false;
+
+ if (type != SIX_LOCK_write)
+ six_acquire(&lock->dep_map, 1, type == SIX_LOCK_read, ip);
+ return true;
+}
+EXPORT_SYMBOL_GPL(six_trylock_ip);
+
+/**
+ * six_relock_ip - attempt to re-take a lock that was held previously
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @seq: lock sequence number obtained from six_lock_seq() while lock was
+ * held previously
+ * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
+ *
+ * Return: true on success, false on failure.
+ */
+bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
+ unsigned seq, unsigned long ip)
+{
+ if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip))
+ return false;
+
+ if (six_lock_seq(lock) != seq) {
+ six_unlock_ip(lock, type, ip);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(six_relock_ip);
+
+#ifdef CONFIG_SIX_LOCK_SPIN_ON_OWNER
+
+static inline bool six_can_spin_on_owner(struct six_lock *lock)
+{
+ struct task_struct *owner;
+ bool ret;
+
+ if (need_resched())
+ return false;
+
+ rcu_read_lock();
+ owner = READ_ONCE(lock->owner);
+ ret = !owner || owner_on_cpu(owner);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool six_spin_on_owner(struct six_lock *lock,
+ struct task_struct *owner,
+ u64 end_time)
+{
+ bool ret = true;
+ unsigned loop = 0;
+
+ rcu_read_lock();
+ while (lock->owner == owner) {
+ /*
+ * Ensure we emit the owner->on_cpu, dereference _after_
+ * checking lock->owner still matches owner. If that fails,
+ * owner might point to freed memory. If it still matches,
+ * the rcu_read_lock() ensures the memory stays valid.
+ */
+ barrier();
+
+ if (!owner_on_cpu(owner) || need_resched()) {
+ ret = false;
+ break;
+ }
+
+ if (!(++loop & 0xf) && (time_after64(sched_clock(), end_time))) {
+ six_set_bitmask(lock, SIX_LOCK_NOSPIN);
+ ret = false;
+ break;
+ }
+
+ cpu_relax();
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
+{
+ struct task_struct *task = current;
+ u64 end_time;
+
+ if (type == SIX_LOCK_write)
+ return false;
+
+ preempt_disable();
+ if (!six_can_spin_on_owner(lock))
+ goto fail;
+
+ if (!osq_lock(&lock->osq))
+ goto fail;
+
+ end_time = sched_clock() + 10 * NSEC_PER_USEC;
+
+ while (1) {
+ struct task_struct *owner;
+
+ /*
+ * If there's an owner, wait for it to either
+ * release the lock or go to sleep.
+ */
+ owner = READ_ONCE(lock->owner);
+ if (owner && !six_spin_on_owner(lock, owner, end_time))
+ break;
+
+ if (do_six_trylock(lock, type, false)) {
+ osq_unlock(&lock->osq);
+ preempt_enable();
+ return true;
+ }
+
+ /*
+ * When there's no owner, we might have preempted between the
+ * owner acquiring the lock and setting the owner field. If
+ * we're an RT task that will live-lock because we won't let
+ * the owner complete.
+ */
+ if (!owner && (need_resched() || rt_task(task)))
+ break;
+
+ /*
+ * The cpu_relax() call is a compiler barrier which forces
+ * everything in this loop to be re-loaded. We don't need
+ * memory barriers as we'll eventually observe the right
+ * values at the cost of a few extra spins.
+ */
+ cpu_relax();
+ }
+
+ osq_unlock(&lock->osq);
+fail:
+ preempt_enable();
+
+ /*
+ * If we fell out of the spin path because of need_resched(),
+ * reschedule now, before we try-lock again. This avoids getting
+ * scheduled out right after we obtained the lock.
+ */
+ if (need_resched())
+ schedule();
+
+ return false;
+}
+
+#else /* CONFIG_SIX_LOCK_SPIN_ON_OWNER */
+
+static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type type)
+{
+ return false;
+}
+
+#endif
+
+noinline
+static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
+ struct six_lock_waiter *wait,
+ six_lock_should_sleep_fn should_sleep_fn, void *p,
+ unsigned long ip)
+{
+ int ret = 0;
+
+ if (type == SIX_LOCK_write) {
+ EBUG_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
+ atomic_add(SIX_LOCK_HELD_write, &lock->state);
+ smp_mb__after_atomic();
+ }
+
+ if (six_optimistic_spin(lock, type))
+ goto out;
+
+ lock_contended(&lock->dep_map, ip);
+
+ wait->task = current;
+ wait->lock_want = type;
+ wait->lock_acquired = false;
+
+ raw_spin_lock(&lock->wait_lock);
+ six_set_bitmask(lock, SIX_LOCK_WAITING_read << type);
+ /*
+ * Retry taking the lock after taking waitlist lock, in case we raced
+ * with an unlock:
+ */
+ ret = __do_six_trylock(lock, type, current, false);
+ if (ret <= 0) {
+ wait->start_time = local_clock();
+
+ if (!list_empty(&lock->wait_list)) {
+ struct six_lock_waiter *last =
+ list_last_entry(&lock->wait_list,
+ struct six_lock_waiter, list);
+
+ if (time_before_eq64(wait->start_time, last->start_time))
+ wait->start_time = last->start_time + 1;
+ }
+
+ list_add_tail(&wait->list, &lock->wait_list);
+ }
+ raw_spin_unlock(&lock->wait_lock);
+
+ if (unlikely(ret > 0)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (unlikely(ret < 0)) {
+ __six_lock_wakeup(lock, -ret - 1);
+ ret = 0;
+ }
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ /*
+ * Ensures that writes to the waitlist entry happen after we see
+ * wait->lock_acquired: pairs with the smp_store_release in
+ * __six_lock_wakeup
+ */
+ if (smp_load_acquire(&wait->lock_acquired))
+ break;
+
+ ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
+ if (unlikely(ret)) {
+ bool acquired;
+
+ /*
+ * If should_sleep_fn() returns an error, we are
+ * required to return that error even if we already
+ * acquired the lock - should_sleep_fn() might have
+ * modified external state (e.g. when the deadlock cycle
+ * detector in bcachefs issued a transaction restart)
+ */
+ raw_spin_lock(&lock->wait_lock);
+ acquired = wait->lock_acquired;
+ if (!acquired)
+ list_del(&wait->list);
+ raw_spin_unlock(&lock->wait_lock);
+
+ if (unlikely(acquired))
+ do_six_unlock_type(lock, type);
+ break;
+ }
+
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+out:
+ if (ret && type == SIX_LOCK_write) {
+ six_clear_bitmask(lock, SIX_LOCK_HELD_write);
+ six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
+ }
+
+ return ret;
+}
+
+/**
+ * six_lock_ip_waiter - take a lock, with full waitlist interface
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @wait: pointer to wait object, which will be added to lock's waitlist
+ * @should_sleep_fn: callback run after adding to waitlist, immediately prior
+ * to scheduling
+ * @p: passed through to @should_sleep_fn
+ * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
+ *
+ * This is the most general six_lock() variant, with parameters to support full
+ * cycle detection for deadlock avoidance.
+ *
+ * The code calling this function must implement tracking of held locks, and the
+ * @wait object should be embedded into the struct that tracks held locks -
+ * which must also be accessible in a thread-safe way.
+ *
+ * @should_sleep_fn should invoke the cycle detector; it should walk each
+ * lock's waiters, and for each waiter recursively walk their held locks.
+ *
+ * When this function must block, @wait will be added to @lock's waitlist before
+ * calling trylock, and before calling @should_sleep_fn, and @wait will not be
+ * removed from the lock waitlist until the lock has been successfully acquired,
+ * or we abort.
+ *
+ * @wait.start_time will be monotonically increasing for any given waitlist, and
+ * thus may be used as a loop cursor.
+ *
+ * Return: 0 on success, or the return code from @should_sleep_fn on failure.
+ */
+int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type,
+ struct six_lock_waiter *wait,
+ six_lock_should_sleep_fn should_sleep_fn, void *p,
+ unsigned long ip)
+{
+ int ret;
+
+ wait->start_time = 0;
+
+ if (type != SIX_LOCK_write)
+ six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, ip);
+
+ ret = do_six_trylock(lock, type, true) ? 0
+ : six_lock_slowpath(lock, type, wait, should_sleep_fn, p, ip);
+
+ if (ret && type != SIX_LOCK_write)
+ six_release(&lock->dep_map, ip);
+ if (!ret)
+ lock_acquired(&lock->dep_map, ip);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(six_lock_ip_waiter);
+
+__always_inline
+static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
+{
+ u32 state;
+
+ if (type == SIX_LOCK_intent)
+ lock->owner = NULL;
+
+ if (type == SIX_LOCK_read &&
+ lock->readers) {
+ smp_mb(); /* unlock barrier */
+ this_cpu_dec(*lock->readers);
+ smp_mb(); /* between unlocking and checking for waiters */
+ state = atomic_read(&lock->state);
+ } else {
+ u32 v = l[type].lock_val;
+
+ if (type != SIX_LOCK_read)
+ v += atomic_read(&lock->state) & SIX_LOCK_NOSPIN;
+
+ EBUG_ON(!(atomic_read(&lock->state) & l[type].held_mask));
+ state = atomic_sub_return_release(v, &lock->state);
+ }
+
+ six_lock_wakeup(lock, state, l[type].unlock_wakeup);
+}
+
+/**
+ * six_unlock_ip - drop a six lock
+ * @lock: lock to unlock
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
+ *
+ * When a lock is held multiple times (because six_lock_incement()) was used),
+ * this decrements the 'lock held' counter by one.
+ *
+ * For example:
+ * six_lock_read(&foo->lock); read count 1
+ * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
+ * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
+ * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
+ */
+void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip)
+{
+ EBUG_ON(type == SIX_LOCK_write &&
+ !(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
+ EBUG_ON((type == SIX_LOCK_write ||
+ type == SIX_LOCK_intent) &&
+ lock->owner != current);
+
+ if (type != SIX_LOCK_write)
+ six_release(&lock->dep_map, ip);
+ else
+ lock->seq++;
+
+ if (type == SIX_LOCK_intent &&
+ lock->intent_lock_recurse) {
+ --lock->intent_lock_recurse;
+ return;
+ }
+
+ do_six_unlock_type(lock, type);
+}
+EXPORT_SYMBOL_GPL(six_unlock_ip);
+
+/**
+ * six_lock_downgrade - convert an intent lock to a read lock
+ * @lock: lock to dowgrade
+ *
+ * @lock will have read count incremented and intent count decremented
+ */
+void six_lock_downgrade(struct six_lock *lock)
+{
+ six_lock_increment(lock, SIX_LOCK_read);
+ six_unlock_intent(lock);
+}
+EXPORT_SYMBOL_GPL(six_lock_downgrade);
+
+/**
+ * six_lock_tryupgrade - attempt to convert read lock to an intent lock
+ * @lock: lock to upgrade
+ *
+ * On success, @lock will have intent count incremented and read count
+ * decremented
+ *
+ * Return: true on success, false on failure
+ */
+bool six_lock_tryupgrade(struct six_lock *lock)
+{
+ u32 old = atomic_read(&lock->state), new;
+
+ do {
+ new = old;
+
+ if (new & SIX_LOCK_HELD_intent)
+ return false;
+
+ if (!lock->readers) {
+ EBUG_ON(!(new & SIX_LOCK_HELD_read));
+ new -= l[SIX_LOCK_read].lock_val;
+ }
+
+ new |= SIX_LOCK_HELD_intent;
+ } while (!atomic_try_cmpxchg_acquire(&lock->state, &old, new));
+
+ if (lock->readers)
+ this_cpu_dec(*lock->readers);
+
+ six_set_owner(lock, SIX_LOCK_intent, old, current);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(six_lock_tryupgrade);
+
+/**
+ * six_trylock_convert - attempt to convert a held lock from one type to another
+ * @lock: lock to upgrade
+ * @from: SIX_LOCK_read or SIX_LOCK_intent
+ * @to: SIX_LOCK_read or SIX_LOCK_intent
+ *
+ * On success, @lock will have intent count incremented and read count
+ * decremented
+ *
+ * Return: true on success, false on failure
+ */
+bool six_trylock_convert(struct six_lock *lock,
+ enum six_lock_type from,
+ enum six_lock_type to)
+{
+ EBUG_ON(to == SIX_LOCK_write || from == SIX_LOCK_write);
+
+ if (to == from)
+ return true;
+
+ if (to == SIX_LOCK_read) {
+ six_lock_downgrade(lock);
+ return true;
+ } else {
+ return six_lock_tryupgrade(lock);
+ }
+}
+EXPORT_SYMBOL_GPL(six_trylock_convert);
+
+/**
+ * six_lock_increment - increase held lock count on a lock that is already held
+ * @lock: lock to increment
+ * @type: SIX_LOCK_read or SIX_LOCK_intent
+ *
+ * @lock must already be held, with a lock type that is greater than or equal to
+ * @type
+ *
+ * A corresponding six_unlock_type() call will be required for @lock to be fully
+ * unlocked.
+ */
+void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
+{
+ six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
+
+ /* XXX: assert already locked, and that we don't overflow: */
+
+ switch (type) {
+ case SIX_LOCK_read:
+ if (lock->readers) {
+ this_cpu_inc(*lock->readers);
+ } else {
+ EBUG_ON(!(atomic_read(&lock->state) &
+ (SIX_LOCK_HELD_read|
+ SIX_LOCK_HELD_intent)));
+ atomic_add(l[type].lock_val, &lock->state);
+ }
+ break;
+ case SIX_LOCK_intent:
+ EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent));
+ lock->intent_lock_recurse++;
+ break;
+ case SIX_LOCK_write:
+ BUG();
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(six_lock_increment);
+
+/**
+ * six_lock_wakeup_all - wake up all waiters on @lock
+ * @lock: lock to wake up waiters for
+ *
+ * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then
+ * abort the lock operation.
+ *
+ * This function is never needed in a bug-free program; it's only useful in
+ * debug code, e.g. to determine if a cycle detector is at fault.
+ */
+void six_lock_wakeup_all(struct six_lock *lock)
+{
+ u32 state = atomic_read(&lock->state);
+ struct six_lock_waiter *w;
+
+ six_lock_wakeup(lock, state, SIX_LOCK_read);
+ six_lock_wakeup(lock, state, SIX_LOCK_intent);
+ six_lock_wakeup(lock, state, SIX_LOCK_write);
+
+ raw_spin_lock(&lock->wait_lock);
+ list_for_each_entry(w, &lock->wait_list, list)
+ wake_up_process(w->task);
+ raw_spin_unlock(&lock->wait_lock);
+}
+EXPORT_SYMBOL_GPL(six_lock_wakeup_all);
+
+/**
+ * six_lock_counts - return held lock counts, for each lock type
+ * @lock: lock to return counters for
+ *
+ * Return: the number of times a lock is held for read, intent and write.
+ */
+struct six_lock_count six_lock_counts(struct six_lock *lock)
+{
+ struct six_lock_count ret;
+
+ ret.n[SIX_LOCK_read] = !lock->readers
+ ? atomic_read(&lock->state) & SIX_LOCK_HELD_read
+ : pcpu_read_count(lock);
+ ret.n[SIX_LOCK_intent] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent) +
+ lock->intent_lock_recurse;
+ ret.n[SIX_LOCK_write] = !!(atomic_read(&lock->state) & SIX_LOCK_HELD_write);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(six_lock_counts);
+
+/**
+ * six_lock_readers_add - directly manipulate reader count of a lock
+ * @lock: lock to add/subtract readers for
+ * @nr: reader count to add/subtract
+ *
+ * When an upper layer is implementing lock reentrency, we may have both read
+ * and intent locks on the same lock.
+ *
+ * When we need to take a write lock, the read locks will cause self-deadlock,
+ * because six locks themselves do not track which read locks are held by the
+ * current thread and which are held by a different thread - it does no
+ * per-thread tracking of held locks.
+ *
+ * The upper layer that is tracking held locks may however, if trylock() has
+ * failed, count up its own read locks, subtract them, take the write lock, and
+ * then re-add them.
+ *
+ * As in any other situation when taking a write lock, @lock must be held for
+ * intent one (or more) times, so @lock will never be left unlocked.
+ */
+void six_lock_readers_add(struct six_lock *lock, int nr)
+{
+ if (lock->readers) {
+ this_cpu_add(*lock->readers, nr);
+ } else {
+ EBUG_ON((int) (atomic_read(&lock->state) & SIX_LOCK_HELD_read) + nr < 0);
+ /* reader count starts at bit 0 */
+ atomic_add(nr, &lock->state);
+ }
+}
+EXPORT_SYMBOL_GPL(six_lock_readers_add);
+
+/**
+ * six_lock_exit - release resources held by a lock prior to freeing
+ * @lock: lock to exit
+ *
+ * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is
+ * required to free the percpu read counts.
+ */
+void six_lock_exit(struct six_lock *lock)
+{
+ WARN_ON(lock->readers && pcpu_read_count(lock));
+ WARN_ON(atomic_read(&lock->state) & SIX_LOCK_HELD_read);
+
+ free_percpu(lock->readers);
+ lock->readers = NULL;
+}
+EXPORT_SYMBOL_GPL(six_lock_exit);
+
+void __six_lock_init(struct six_lock *lock, const char *name,
+ struct lock_class_key *key, enum six_lock_init_flags flags)
+{
+ atomic_set(&lock->state, 0);
+ raw_spin_lock_init(&lock->wait_lock);
+ INIT_LIST_HEAD(&lock->wait_list);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ debug_check_no_locks_freed((void *) lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+
+ /*
+ * Don't assume that we have real percpu variables available in
+ * userspace:
+ */
+#ifdef __KERNEL__
+ if (flags & SIX_LOCK_INIT_PCPU) {
+ /*
+ * We don't return an error here on memory allocation failure
+ * since percpu is an optimization, and locks will work with the
+ * same semantics in non-percpu mode: callers can check for
+ * failure if they wish by checking lock->readers, but generally
+ * will not want to treat it as an error.
+ */
+ lock->readers = alloc_percpu(unsigned);
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(__six_lock_init);
diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h
new file mode 100644
index 000000000000..4c268b0b8316
--- /dev/null
+++ b/fs/bcachefs/six.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_SIX_H
+#define _LINUX_SIX_H
+
+/**
+ * DOC: SIX locks overview
+ *
+ * Shared/intent/exclusive locks: sleepable read/write locks, like rw semaphores
+ * but with an additional state: read/shared, intent, exclusive/write
+ *
+ * The purpose of the intent state is to allow for greater concurrency on tree
+ * structures without deadlocking. In general, a read can't be upgraded to a
+ * write lock without deadlocking, so an operation that updates multiple nodes
+ * will have to take write locks for the full duration of the operation.
+ *
+ * But by adding an intent state, which is exclusive with other intent locks but
+ * not with readers, we can take intent locks at thte start of the operation,
+ * and then take write locks only for the actual update to each individual
+ * nodes, without deadlocking.
+ *
+ * Example usage:
+ * six_lock_read(&foo->lock);
+ * six_unlock_read(&foo->lock);
+ *
+ * An intent lock must be held before taking a write lock:
+ * six_lock_intent(&foo->lock);
+ * six_lock_write(&foo->lock);
+ * six_unlock_write(&foo->lock);
+ * six_unlock_intent(&foo->lock);
+ *
+ * Other operations:
+ * six_trylock_read()
+ * six_trylock_intent()
+ * six_trylock_write()
+ *
+ * six_lock_downgrade() convert from intent to read
+ * six_lock_tryupgrade() attempt to convert from read to intent, may fail
+ *
+ * There are also interfaces that take the lock type as an enum:
+ *
+ * six_lock_type(&foo->lock, SIX_LOCK_read);
+ * six_trylock_convert(&foo->lock, SIX_LOCK_read, SIX_LOCK_intent)
+ * six_lock_type(&foo->lock, SIX_LOCK_write);
+ * six_unlock_type(&foo->lock, SIX_LOCK_write);
+ * six_unlock_type(&foo->lock, SIX_LOCK_intent);
+ *
+ * Lock sequence numbers - unlock(), relock():
+ *
+ * Locks embed sequences numbers, which are incremented on write lock/unlock.
+ * This allows locks to be dropped and the retaken iff the state they protect
+ * hasn't changed; this makes it much easier to avoid holding locks while e.g.
+ * doing IO or allocating memory.
+ *
+ * Example usage:
+ * six_lock_read(&foo->lock);
+ * u32 seq = six_lock_seq(&foo->lock);
+ * six_unlock_read(&foo->lock);
+ *
+ * some_operation_that_may_block();
+ *
+ * if (six_relock_read(&foo->lock, seq)) { ... }
+ *
+ * If the relock operation succeeds, it is as if the lock was never unlocked.
+ *
+ * Reentrancy:
+ *
+ * Six locks are not by themselves reentrent, but have counters for both the
+ * read and intent states that can be used to provide reentrency by an upper
+ * layer that tracks held locks. If a lock is known to already be held in the
+ * read or intent state, six_lock_increment() can be used to bump the "lock
+ * held in this state" counter, increasing the number of unlock calls that
+ * will be required to fully unlock it.
+ *
+ * Example usage:
+ * six_lock_read(&foo->lock);
+ * six_lock_increment(&foo->lock, SIX_LOCK_read);
+ * six_unlock_read(&foo->lock);
+ * six_unlock_read(&foo->lock);
+ * foo->lock is now fully unlocked.
+ *
+ * Since the intent state supercedes read, it's legal to increment the read
+ * counter when holding an intent lock, but not the reverse.
+ *
+ * A lock may only be held once for write: six_lock_increment(.., SIX_LOCK_write)
+ * is not legal.
+ *
+ * should_sleep_fn:
+ *
+ * There is a six_lock() variant that takes a function pointer that is called
+ * immediately prior to schedule() when blocking, and may return an error to
+ * abort.
+ *
+ * One possible use for this feature is when objects being locked are part of
+ * a cache and may reused, and lock ordering is based on a property of the
+ * object that will change when the object is reused - i.e. logical key order.
+ *
+ * If looking up an object in the cache may race with object reuse, and lock
+ * ordering is required to prevent deadlock, object reuse may change the
+ * correct lock order for that object and cause a deadlock. should_sleep_fn
+ * can be used to check if the object is still the object we want and avoid
+ * this deadlock.
+ *
+ * Wait list entry interface:
+ *
+ * There is a six_lock() variant, six_lock_waiter(), that takes a pointer to a
+ * wait list entry. By embedding six_lock_waiter into another object, and by
+ * traversing lock waitlists, it is then possible for an upper layer to
+ * implement full cycle detection for deadlock avoidance.
+ *
+ * should_sleep_fn should be used for invoking the cycle detector, walking the
+ * graph of held locks to check for a deadlock. The upper layer must track
+ * held locks for each thread, and each thread's held locks must be reachable
+ * from its six_lock_waiter object.
+ *
+ * six_lock_waiter() will add the wait object to the waitlist re-trying taking
+ * the lock, and before calling should_sleep_fn, and the wait object will not
+ * be removed from the waitlist until either the lock has been successfully
+ * acquired, or we aborted because should_sleep_fn returned an error.
+ *
+ * Also, six_lock_waiter contains a timestamp, and waiters on a waitlist will
+ * have timestamps in strictly ascending order - this is so the timestamp can
+ * be used as a cursor for lock graph traverse.
+ */
+
+#include <linux/lockdep.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_SIX_LOCK_SPIN_ON_OWNER
+#include <linux/osq_lock.h>
+#endif
+
+enum six_lock_type {
+ SIX_LOCK_read,
+ SIX_LOCK_intent,
+ SIX_LOCK_write,
+};
+
+struct six_lock {
+ atomic_t state;
+ u32 seq;
+ unsigned intent_lock_recurse;
+ struct task_struct *owner;
+ unsigned __percpu *readers;
+#ifdef CONFIG_SIX_LOCK_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq;
+#endif
+ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+struct six_lock_waiter {
+ struct list_head list;
+ struct task_struct *task;
+ enum six_lock_type lock_want;
+ bool lock_acquired;
+ u64 start_time;
+};
+
+typedef int (*six_lock_should_sleep_fn)(struct six_lock *lock, void *);
+
+void six_lock_exit(struct six_lock *lock);
+
+enum six_lock_init_flags {
+ SIX_LOCK_INIT_PCPU = 1U << 0,
+};
+
+void __six_lock_init(struct six_lock *lock, const char *name,
+ struct lock_class_key *key, enum six_lock_init_flags flags);
+
+/**
+ * six_lock_init - initialize a six lock
+ * @lock: lock to initialize
+ * @flags: optional flags, i.e. SIX_LOCK_INIT_PCPU
+ */
+#define six_lock_init(lock, flags) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __six_lock_init((lock), #lock, &__key, flags); \
+} while (0)
+
+/**
+ * six_lock_seq - obtain current lock sequence number
+ * @lock: six_lock to obtain sequence number for
+ *
+ * @lock should be held for read or intent, and not write
+ *
+ * By saving the lock sequence number, we can unlock @lock and then (typically
+ * after some blocking operation) attempt to relock it: the relock will succeed
+ * if the sequence number hasn't changed, meaning no write locks have been taken
+ * and state corresponding to what @lock protects is still valid.
+ */
+static inline u32 six_lock_seq(const struct six_lock *lock)
+{
+ return lock->seq;
+}
+
+bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip);
+
+/**
+ * six_trylock_type - attempt to take a six lock without blocking
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ *
+ * Return: true on success, false on failure.
+ */
+static inline bool six_trylock_type(struct six_lock *lock, enum six_lock_type type)
+{
+ return six_trylock_ip(lock, type, _THIS_IP_);
+}
+
+int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type,
+ struct six_lock_waiter *wait,
+ six_lock_should_sleep_fn should_sleep_fn, void *p,
+ unsigned long ip);
+
+/**
+ * six_lock_waiter - take a lock, with full waitlist interface
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @wait: pointer to wait object, which will be added to lock's waitlist
+ * @should_sleep_fn: callback run after adding to waitlist, immediately prior
+ * to scheduling
+ * @p: passed through to @should_sleep_fn
+ *
+ * This is a convenience wrapper around six_lock_ip_waiter(), see that function
+ * for full documentation.
+ *
+ * Return: 0 on success, or the return code from @should_sleep_fn on failure.
+ */
+static inline int six_lock_waiter(struct six_lock *lock, enum six_lock_type type,
+ struct six_lock_waiter *wait,
+ six_lock_should_sleep_fn should_sleep_fn, void *p)
+{
+ return six_lock_ip_waiter(lock, type, wait, should_sleep_fn, p, _THIS_IP_);
+}
+
+/**
+ * six_lock_ip - take a six lock lock
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @should_sleep_fn: callback run after adding to waitlist, immediately prior
+ * to scheduling
+ * @p: passed through to @should_sleep_fn
+ * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_
+ *
+ * Return: 0 on success, or the return code from @should_sleep_fn on failure.
+ */
+static inline int six_lock_ip(struct six_lock *lock, enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn, void *p,
+ unsigned long ip)
+{
+ struct six_lock_waiter wait;
+
+ return six_lock_ip_waiter(lock, type, &wait, should_sleep_fn, p, ip);
+}
+
+/**
+ * six_lock_type - take a six lock lock
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @should_sleep_fn: callback run after adding to waitlist, immediately prior
+ * to scheduling
+ * @p: passed through to @should_sleep_fn
+ *
+ * Return: 0 on success, or the return code from @should_sleep_fn on failure.
+ */
+static inline int six_lock_type(struct six_lock *lock, enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn, void *p)
+{
+ struct six_lock_waiter wait;
+
+ return six_lock_ip_waiter(lock, type, &wait, should_sleep_fn, p, _THIS_IP_);
+}
+
+bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
+ unsigned seq, unsigned long ip);
+
+/**
+ * six_relock_type - attempt to re-take a lock that was held previously
+ * @lock: lock to take
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ * @seq: lock sequence number obtained from six_lock_seq() while lock was
+ * held previously
+ *
+ * Return: true on success, false on failure.
+ */
+static inline bool six_relock_type(struct six_lock *lock, enum six_lock_type type,
+ unsigned seq)
+{
+ return six_relock_ip(lock, type, seq, _THIS_IP_);
+}
+
+void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip);
+
+/**
+ * six_unlock_type - drop a six lock
+ * @lock: lock to unlock
+ * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write
+ *
+ * When a lock is held multiple times (because six_lock_incement()) was used),
+ * this decrements the 'lock held' counter by one.
+ *
+ * For example:
+ * six_lock_read(&foo->lock); read count 1
+ * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2
+ * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1
+ * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0
+ */
+static inline void six_unlock_type(struct six_lock *lock, enum six_lock_type type)
+{
+ six_unlock_ip(lock, type, _THIS_IP_);
+}
+
+#define __SIX_LOCK(type) \
+static inline bool six_trylock_ip_##type(struct six_lock *lock, unsigned long ip)\
+{ \
+ return six_trylock_ip(lock, SIX_LOCK_##type, ip); \
+} \
+ \
+static inline bool six_trylock_##type(struct six_lock *lock) \
+{ \
+ return six_trylock_ip(lock, SIX_LOCK_##type, _THIS_IP_); \
+} \
+ \
+static inline int six_lock_ip_waiter_##type(struct six_lock *lock, \
+ struct six_lock_waiter *wait, \
+ six_lock_should_sleep_fn should_sleep_fn, void *p,\
+ unsigned long ip) \
+{ \
+ return six_lock_ip_waiter(lock, SIX_LOCK_##type, wait, should_sleep_fn, p, ip);\
+} \
+ \
+static inline int six_lock_ip_##type(struct six_lock *lock, \
+ six_lock_should_sleep_fn should_sleep_fn, void *p, \
+ unsigned long ip) \
+{ \
+ return six_lock_ip(lock, SIX_LOCK_##type, should_sleep_fn, p, ip);\
+} \
+ \
+static inline bool six_relock_ip_##type(struct six_lock *lock, u32 seq, unsigned long ip)\
+{ \
+ return six_relock_ip(lock, SIX_LOCK_##type, seq, ip); \
+} \
+ \
+static inline bool six_relock_##type(struct six_lock *lock, u32 seq) \
+{ \
+ return six_relock_ip(lock, SIX_LOCK_##type, seq, _THIS_IP_); \
+} \
+ \
+static inline int six_lock_##type(struct six_lock *lock, \
+ six_lock_should_sleep_fn fn, void *p)\
+{ \
+ return six_lock_ip_##type(lock, fn, p, _THIS_IP_); \
+} \
+ \
+static inline void six_unlock_ip_##type(struct six_lock *lock, unsigned long ip) \
+{ \
+ six_unlock_ip(lock, SIX_LOCK_##type, ip); \
+} \
+ \
+static inline void six_unlock_##type(struct six_lock *lock) \
+{ \
+ six_unlock_ip(lock, SIX_LOCK_##type, _THIS_IP_); \
+}
+
+__SIX_LOCK(read)
+__SIX_LOCK(intent)
+__SIX_LOCK(write)
+#undef __SIX_LOCK
+
+void six_lock_downgrade(struct six_lock *);
+bool six_lock_tryupgrade(struct six_lock *);
+bool six_trylock_convert(struct six_lock *, enum six_lock_type,
+ enum six_lock_type);
+
+void six_lock_increment(struct six_lock *, enum six_lock_type);
+
+void six_lock_wakeup_all(struct six_lock *);
+
+struct six_lock_count {
+ unsigned n[3];
+};
+
+struct six_lock_count six_lock_counts(struct six_lock *);
+void six_lock_readers_add(struct six_lock *, int);
+
+#endif /* _LINUX_SIX_H */
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
new file mode 100644
index 000000000000..4982468bfe11
--- /dev/null
+++ b/fs/bcachefs/snapshot.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "bkey_buf.h"
+#include "btree_key_cache.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "errcode.h"
+#include "error.h"
+#include "fs.h"
+#include "snapshot.h"
+
+#include <linux/random.h>
+
+/*
+ * Snapshot trees:
+ *
+ * Keys in BTREE_ID_snapshot_trees identify a whole tree of snapshot nodes; they
+ * exist to provide a stable identifier for the whole lifetime of a snapshot
+ * tree.
+ */
+
+void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_snapshot_tree t = bkey_s_c_to_snapshot_tree(k);
+
+ prt_printf(out, "subvol %u root snapshot %u",
+ le32_to_cpu(t.v->master_subvol),
+ le32_to_cpu(t.v->root_snapshot));
+}
+
+int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+ bkey_lt(k.k->p, POS(0, 1))) {
+ prt_printf(err, "bad pos");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
+ struct bch_snapshot_tree *s)
+{
+ int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
+ BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
+
+ if (bch2_err_matches(ret, ENOENT))
+ ret = -BCH_ERR_ENOENT_snapshot_tree;
+ return ret;
+}
+
+struct bkey_i_snapshot_tree *
+__bch2_snapshot_tree_create(struct btree_trans *trans)
+{
+ struct btree_iter iter;
+ int ret = bch2_bkey_get_empty_slot(trans, &iter,
+ BTREE_ID_snapshot_trees, POS(0, U32_MAX));
+ struct bkey_i_snapshot_tree *s_t;
+
+ if (ret == -BCH_ERR_ENOSPC_btree_slot)
+ ret = -BCH_ERR_ENOSPC_snapshot_tree;
+ if (ret)
+ return ERR_PTR(ret);
+
+ s_t = bch2_bkey_alloc(trans, &iter, 0, snapshot_tree);
+ ret = PTR_ERR_OR_ZERO(s_t);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret ? ERR_PTR(ret) : s_t;
+}
+
+static int bch2_snapshot_tree_create(struct btree_trans *trans,
+ u32 root_id, u32 subvol_id, u32 *tree_id)
+{
+ struct bkey_i_snapshot_tree *n_tree =
+ __bch2_snapshot_tree_create(trans);
+
+ if (IS_ERR(n_tree))
+ return PTR_ERR(n_tree);
+
+ n_tree->v.master_subvol = cpu_to_le32(subvol_id);
+ n_tree->v.root_snapshot = cpu_to_le32(root_id);
+ *tree_id = n_tree->k.p.offset;
+ return 0;
+}
+
+/* Snapshot nodes: */
+
+static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
+{
+ struct snapshot_table *t;
+
+ rcu_read_lock();
+ t = rcu_dereference(c->snapshots);
+
+ while (id && id < ancestor)
+ id = __snapshot_t(t, id)->parent;
+ rcu_read_unlock();
+
+ return id == ancestor;
+}
+
+static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
+{
+ const struct snapshot_t *s = __snapshot_t(t, id);
+
+ if (s->skip[2] <= ancestor)
+ return s->skip[2];
+ if (s->skip[1] <= ancestor)
+ return s->skip[1];
+ if (s->skip[0] <= ancestor)
+ return s->skip[0];
+ return s->parent;
+}
+
+bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
+{
+ struct snapshot_table *t;
+ bool ret;
+
+ EBUG_ON(c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_snapshots);
+
+ rcu_read_lock();
+ t = rcu_dereference(c->snapshots);
+
+ while (id && id < ancestor - IS_ANCESTOR_BITMAP)
+ id = get_ancestor_below(t, id, ancestor);
+
+ if (id && id < ancestor) {
+ ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
+
+ EBUG_ON(ret != bch2_snapshot_is_ancestor_early(c, id, ancestor));
+ } else {
+ ret = id == ancestor;
+ }
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
+{
+ size_t idx = U32_MAX - id;
+ size_t new_size;
+ struct snapshot_table *new, *old;
+
+ new_size = max(16UL, roundup_pow_of_two(idx + 1));
+
+ new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ old = rcu_dereference_protected(c->snapshots, true);
+ if (old)
+ memcpy(new->s,
+ rcu_dereference_protected(c->snapshots, true)->s,
+ sizeof(new->s[0]) * c->snapshot_table_size);
+
+ rcu_assign_pointer(c->snapshots, new);
+ c->snapshot_table_size = new_size;
+ kvfree_rcu_mightsleep(old);
+
+ return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+}
+
+static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
+{
+ size_t idx = U32_MAX - id;
+
+ lockdep_assert_held(&c->snapshot_table_lock);
+
+ if (likely(idx < c->snapshot_table_size))
+ return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+
+ return __snapshot_t_mut(c, id);
+}
+
+void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
+
+ prt_printf(out, "is_subvol %llu deleted %llu parent %10u children %10u %10u subvol %u tree %u",
+ BCH_SNAPSHOT_SUBVOL(s.v),
+ BCH_SNAPSHOT_DELETED(s.v),
+ le32_to_cpu(s.v->parent),
+ le32_to_cpu(s.v->children[0]),
+ le32_to_cpu(s.v->children[1]),
+ le32_to_cpu(s.v->subvol),
+ le32_to_cpu(s.v->tree));
+
+ if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
+ prt_printf(out, " depth %u skiplist %u %u %u",
+ le32_to_cpu(s.v->depth),
+ le32_to_cpu(s.v->skip[0]),
+ le32_to_cpu(s.v->skip[1]),
+ le32_to_cpu(s.v->skip[2]));
+}
+
+int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ struct bkey_s_c_snapshot s;
+ u32 i, id;
+
+ if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+ bkey_lt(k.k->p, POS(0, 1))) {
+ prt_printf(err, "bad pos");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ s = bkey_s_c_to_snapshot(k);
+
+ id = le32_to_cpu(s.v->parent);
+ if (id && id <= k.k->p.offset) {
+ prt_printf(err, "bad parent node (%u <= %llu)",
+ id, k.k->p.offset);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
+ prt_printf(err, "children not normalized");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (s.v->children[0] &&
+ s.v->children[0] == s.v->children[1]) {
+ prt_printf(err, "duplicate child nodes");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ for (i = 0; i < 2; i++) {
+ id = le32_to_cpu(s.v->children[i]);
+
+ if (id >= k.k->p.offset) {
+ prt_printf(err, "bad child node (%u >= %llu)",
+ id, k.k->p.offset);
+ return -BCH_ERR_invalid_bkey;
+ }
+ }
+
+ if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
+ if (le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
+ le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2])) {
+ prt_printf(err, "skiplist not normalized");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
+ id = le32_to_cpu(s.v->skip[i]);
+
+ if ((id && !s.v->parent) ||
+ (id && id <= k.k->p.offset)) {
+ prt_printf(err, "bad skiplist node %u", id);
+ return -BCH_ERR_invalid_bkey;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
+{
+ struct snapshot_t *t = snapshot_t_mut(c, id);
+ u32 parent = id;
+
+ while ((parent = bch2_snapshot_parent_early(c, parent)) &&
+ parent - id - 1 < IS_ANCESTOR_BITMAP)
+ __set_bit(parent - id - 1, t->is_ancestor);
+}
+
+static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
+{
+ mutex_lock(&c->snapshot_table_lock);
+ __set_is_ancestor_bitmap(c, id);
+ mutex_unlock(&c->snapshot_table_lock);
+}
+
+int bch2_mark_snapshot(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bkey_s_c old, struct bkey_s_c new,
+ unsigned flags)
+{
+ struct bch_fs *c = trans->c;
+ struct snapshot_t *t;
+ u32 id = new.k->p.offset;
+ int ret = 0;
+
+ mutex_lock(&c->snapshot_table_lock);
+
+ t = snapshot_t_mut(c, id);
+ if (!t) {
+ ret = -BCH_ERR_ENOMEM_mark_snapshot;
+ goto err;
+ }
+
+ if (new.k->type == KEY_TYPE_snapshot) {
+ struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
+
+ t->parent = le32_to_cpu(s.v->parent);
+ t->children[0] = le32_to_cpu(s.v->children[0]);
+ t->children[1] = le32_to_cpu(s.v->children[1]);
+ t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
+ t->tree = le32_to_cpu(s.v->tree);
+
+ if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
+ t->depth = le32_to_cpu(s.v->depth);
+ t->skip[0] = le32_to_cpu(s.v->skip[0]);
+ t->skip[1] = le32_to_cpu(s.v->skip[1]);
+ t->skip[2] = le32_to_cpu(s.v->skip[2]);
+ } else {
+ t->depth = 0;
+ t->skip[0] = 0;
+ t->skip[1] = 0;
+ t->skip[2] = 0;
+ }
+
+ __set_is_ancestor_bitmap(c, id);
+
+ if (BCH_SNAPSHOT_DELETED(s.v)) {
+ set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_delete_dead_snapshots);
+ }
+ } else {
+ memset(t, 0, sizeof(*t));
+ }
+err:
+ mutex_unlock(&c->snapshot_table_lock);
+ return ret;
+}
+
+int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
+ struct bch_snapshot *s)
+{
+ return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
+ BTREE_ITER_WITH_UPDATES, snapshot, s);
+}
+
+static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
+{
+ struct bch_snapshot v;
+ int ret;
+
+ if (!id)
+ return 0;
+
+ ret = bch2_snapshot_lookup(trans, id, &v);
+ if (bch2_err_matches(ret, ENOENT))
+ bch_err(trans->c, "snapshot node %u not found", id);
+ if (ret)
+ return ret;
+
+ return !BCH_SNAPSHOT_DELETED(&v);
+}
+
+/*
+ * If @k is a snapshot with just one live child, it's part of a linear chain,
+ * which we consider to be an equivalence class: and then after snapshot
+ * deletion cleanup, there should only be a single key at a given position in
+ * this equivalence class.
+ *
+ * This sets the equivalence class of @k to be the child's equivalence class, if
+ * it's part of such a linear chain: this correctly sets equivalence classes on
+ * startup if we run leaf to root (i.e. in natural key order).
+ */
+static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ unsigned i, nr_live = 0, live_idx = 0;
+ struct bkey_s_c_snapshot snap;
+ u32 id = k.k->p.offset, child[2];
+
+ if (k.k->type != KEY_TYPE_snapshot)
+ return 0;
+
+ snap = bkey_s_c_to_snapshot(k);
+
+ child[0] = le32_to_cpu(snap.v->children[0]);
+ child[1] = le32_to_cpu(snap.v->children[1]);
+
+ for (i = 0; i < 2; i++) {
+ int ret = bch2_snapshot_live(trans, child[i]);
+
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ live_idx = i;
+ nr_live += ret;
+ }
+
+ mutex_lock(&c->snapshot_table_lock);
+
+ snapshot_t_mut(c, id)->equiv = nr_live == 1
+ ? snapshot_t_mut(c, child[live_idx])->equiv
+ : id;
+
+ mutex_unlock(&c->snapshot_table_lock);
+
+ return 0;
+}
+
+/* fsck: */
+
+static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
+{
+ return snapshot_t(c, id)->children[child];
+}
+
+static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
+{
+ return bch2_snapshot_child(c, id, 0);
+}
+
+static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
+{
+ return bch2_snapshot_child(c, id, 1);
+}
+
+static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
+{
+ u32 n, parent;
+
+ n = bch2_snapshot_left_child(c, id);
+ if (n)
+ return n;
+
+ while ((parent = bch2_snapshot_parent(c, id))) {
+ n = bch2_snapshot_right_child(c, parent);
+ if (n && n != id)
+ return n;
+ id = parent;
+ }
+
+ return 0;
+}
+
+static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
+{
+ u32 id = snapshot_root;
+ u32 subvol = 0, s;
+
+ while (id) {
+ s = snapshot_t(c, id)->subvol;
+
+ if (s && (!subvol || s < subvol))
+ subvol = s;
+
+ id = bch2_snapshot_tree_next(c, id);
+ }
+
+ return subvol;
+}
+
+static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
+ u32 snapshot_root, u32 *subvol_id)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_subvolume s;
+ bool found = false;
+ int ret;
+
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN,
+ 0, k, ret) {
+ if (k.k->type != KEY_TYPE_subvolume)
+ continue;
+
+ s = bkey_s_c_to_subvolume(k);
+ if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
+ continue;
+ if (!BCH_SUBVOLUME_SNAP(s.v)) {
+ *subvol_id = s.k->p.offset;
+ found = true;
+ break;
+ }
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (!ret && !found) {
+ struct bkey_i_subvolume *u;
+
+ *subvol_id = bch2_snapshot_tree_oldest_subvol(c, snapshot_root);
+
+ u = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_subvolumes, POS(0, *subvol_id),
+ 0, subvolume);
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ return ret;
+
+ SET_BCH_SUBVOLUME_SNAP(&u->v, false);
+ }
+
+ return ret;
+}
+
+static int check_snapshot_tree(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c_snapshot_tree st;
+ struct bch_snapshot s;
+ struct bch_subvolume subvol;
+ struct printbuf buf = PRINTBUF;
+ u32 root_id;
+ int ret;
+
+ if (k.k->type != KEY_TYPE_snapshot_tree)
+ return 0;
+
+ st = bkey_s_c_to_snapshot_tree(k);
+ root_id = le32_to_cpu(st.v->root_snapshot);
+
+ ret = bch2_snapshot_lookup(trans, root_id, &s);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ goto err;
+
+ if (fsck_err_on(ret ||
+ root_id != bch2_snapshot_root(c, root_id) ||
+ st.k->p.offset != le32_to_cpu(s.tree),
+ c,
+ "snapshot tree points to missing/incorrect snapshot:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter, 0);
+ goto err;
+ }
+
+ ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol),
+ false, 0, &subvol);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ goto err;
+
+ if (fsck_err_on(ret, c,
+ "snapshot tree points to missing subvolume:\n %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
+ fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
+ le32_to_cpu(subvol.snapshot),
+ root_id), c,
+ "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
+ fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c,
+ "snapshot tree points to snapshot subvolume:\n %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
+ struct bkey_i_snapshot_tree *u;
+ u32 subvol_id;
+
+ ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
+ if (ret)
+ goto err;
+
+ u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ goto err;
+
+ u->v.master_subvol = cpu_to_le32(subvol_id);
+ st = snapshot_tree_i_to_s_c(u);
+ }
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+/*
+ * For each snapshot_tree, make sure it points to the root of a snapshot tree
+ * and that snapshot entry points back to it, or delete it.
+ *
+ * And, make sure it points to a subvolume within that snapshot tree, or correct
+ * it to point to the oldest subvolume within that snapshot tree.
+ */
+int bch2_check_snapshot_trees(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_snapshot_trees, POS_MIN,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_snapshot_tree(trans, &iter, k)));
+
+ if (ret)
+ bch_err(c, "error %i checking snapshot trees", ret);
+ return ret;
+}
+
+/*
+ * Look up snapshot tree for @tree_id and find root,
+ * make sure @snap_id is a descendent:
+ */
+static int snapshot_tree_ptr_good(struct btree_trans *trans,
+ u32 snap_id, u32 tree_id)
+{
+ struct bch_snapshot_tree s_t;
+ int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
+
+ if (bch2_err_matches(ret, ENOENT))
+ return 0;
+ if (ret)
+ return ret;
+
+ return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
+}
+
+u32 bch2_snapshot_skiplist_get(struct bch_fs *c, u32 id)
+{
+ const struct snapshot_t *s;
+
+ if (!id)
+ return 0;
+
+ rcu_read_lock();
+ s = snapshot_t(c, id);
+ if (s->parent)
+ id = bch2_snapshot_nth_parent(c, id, get_random_u32_below(s->depth));
+ rcu_read_unlock();
+
+ return id;
+}
+
+static int snapshot_skiplist_good(struct btree_trans *trans, u32 id, struct bch_snapshot s)
+{
+ unsigned i;
+
+ for (i = 0; i < 3; i++)
+ if (!s.parent) {
+ if (s.skip[i])
+ return false;
+ } else {
+ if (!bch2_snapshot_is_ancestor_early(trans->c, id, le32_to_cpu(s.skip[i])))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * snapshot_tree pointer was incorrect: look up root snapshot node, make sure
+ * its snapshot_tree pointer is correct (allocate new one if necessary), then
+ * update this node's pointer to root node's pointer:
+ */
+static int snapshot_tree_ptr_repair(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct bch_snapshot *s)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter root_iter;
+ struct bch_snapshot_tree s_t;
+ struct bkey_s_c_snapshot root;
+ struct bkey_i_snapshot *u;
+ u32 root_id = bch2_snapshot_root(c, k.k->p.offset), tree_id;
+ int ret;
+
+ root = bch2_bkey_get_iter_typed(trans, &root_iter,
+ BTREE_ID_snapshots, POS(0, root_id),
+ BTREE_ITER_WITH_UPDATES, snapshot);
+ ret = bkey_err(root);
+ if (ret)
+ goto err;
+
+ tree_id = le32_to_cpu(root.v->tree);
+
+ ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
+ if (ret && !bch2_err_matches(ret, ENOENT))
+ return ret;
+
+ if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
+ u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(u) ?:
+ bch2_snapshot_tree_create(trans, root_id,
+ bch2_snapshot_tree_oldest_subvol(c, root_id),
+ &tree_id);
+ if (ret)
+ goto err;
+
+ u->v.tree = cpu_to_le32(tree_id);
+ if (k.k->p.offset == root_id)
+ *s = u->v;
+ }
+
+ if (k.k->p.offset != root_id) {
+ u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ goto err;
+
+ u->v.tree = cpu_to_le32(tree_id);
+ *s = u->v;
+ }
+err:
+ bch2_trans_iter_exit(trans, &root_iter);
+ return ret;
+}
+
+static int check_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_snapshot s;
+ struct bch_subvolume subvol;
+ struct bch_snapshot v;
+ struct bkey_i_snapshot *u;
+ u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
+ u32 real_depth;
+ struct printbuf buf = PRINTBUF;
+ bool should_have_subvol;
+ u32 i, id;
+ int ret = 0;
+
+ if (k.k->type != KEY_TYPE_snapshot)
+ return 0;
+
+ memset(&s, 0, sizeof(s));
+ memcpy(&s, k.v, bkey_val_bytes(k.k));
+
+ id = le32_to_cpu(s.parent);
+ if (id) {
+ ret = bch2_snapshot_lookup(trans, id, &v);
+ if (bch2_err_matches(ret, ENOENT))
+ bch_err(c, "snapshot with nonexistent parent:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ if (ret)
+ goto err;
+
+ if (le32_to_cpu(v.children[0]) != k.k->p.offset &&
+ le32_to_cpu(v.children[1]) != k.k->p.offset) {
+ bch_err(c, "snapshot parent %u missing pointer to child %llu",
+ id, k.k->p.offset);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < 2 && s.children[i]; i++) {
+ id = le32_to_cpu(s.children[i]);
+
+ ret = bch2_snapshot_lookup(trans, id, &v);
+ if (bch2_err_matches(ret, ENOENT))
+ bch_err(c, "snapshot node %llu has nonexistent child %u",
+ k.k->p.offset, id);
+ if (ret)
+ goto err;
+
+ if (le32_to_cpu(v.parent) != k.k->p.offset) {
+ bch_err(c, "snapshot child %u has wrong parent (got %u should be %llu)",
+ id, le32_to_cpu(v.parent), k.k->p.offset);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
+ !BCH_SNAPSHOT_DELETED(&s);
+
+ if (should_have_subvol) {
+ id = le32_to_cpu(s.subvol);
+ ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
+ if (bch2_err_matches(ret, ENOENT))
+ bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ if (ret)
+ goto err;
+
+ if (BCH_SNAPSHOT_SUBVOL(&s) != (le32_to_cpu(subvol.snapshot) == k.k->p.offset)) {
+ bch_err(c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
+ k.k->p.offset);
+ ret = -EINVAL;
+ goto err;
+ }
+ } else {
+ if (fsck_err_on(s.subvol, c, "snapshot should not point to subvol:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ goto err;
+
+ u->v.subvol = 0;
+ s = u->v;
+ }
+ }
+
+ ret = snapshot_tree_ptr_good(trans, k.k->p.offset, le32_to_cpu(s.tree));
+ if (ret < 0)
+ goto err;
+
+ if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
+ if (ret)
+ goto err;
+ }
+ ret = 0;
+
+ real_depth = bch2_snapshot_depth(c, parent_id);
+
+ if (le32_to_cpu(s.depth) != real_depth &&
+ (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
+ fsck_err(c, "snapshot with incorrect depth field, should be %u:\n %s",
+ real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
+ u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ goto err;
+
+ u->v.depth = cpu_to_le32(real_depth);
+ s = u->v;
+ }
+
+ ret = snapshot_skiplist_good(trans, k.k->p.offset, s);
+ if (ret < 0)
+ goto err;
+
+ if (!ret &&
+ (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
+ fsck_err(c, "snapshot with bad skiplist field:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
+ u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
+ u->v.skip[i] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent_id));
+
+ bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
+ s = u->v;
+ }
+ ret = 0;
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+int bch2_check_snapshots(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ /*
+ * We iterate backwards as checking/fixing the depth field requires that
+ * the parent's depth already be correct:
+ */
+ ret = bch2_trans_run(c,
+ for_each_btree_key_reverse_commit(trans, iter,
+ BTREE_ID_snapshots, POS_MAX,
+ BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_snapshot(trans, &iter, k)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/*
+ * Mark a snapshot as deleted, for future cleanup:
+ */
+int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
+{
+ struct btree_iter iter;
+ struct bkey_i_snapshot *s;
+ int ret = 0;
+
+ s = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_snapshots, POS(0, id),
+ 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (unlikely(ret)) {
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
+ trans->c, "missing snapshot %u", id);
+ return ret;
+ }
+
+ /* already deleted? */
+ if (BCH_SNAPSHOT_DELETED(&s->v))
+ goto err;
+
+ SET_BCH_SNAPSHOT_DELETED(&s->v, true);
+ SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
+ s->v.subvol = 0;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
+{
+ if (le32_to_cpu(s->children[0]) < le32_to_cpu(s->children[1]))
+ swap(s->children[0], s->children[1]);
+}
+
+static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
+ struct btree_iter c_iter = (struct btree_iter) { NULL };
+ struct btree_iter tree_iter = (struct btree_iter) { NULL };
+ struct bkey_s_c_snapshot s;
+ u32 parent_id, child_id;
+ unsigned i;
+ int ret = 0;
+
+ s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
+ BTREE_ITER_INTENT, snapshot);
+ ret = bkey_err(s);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "missing snapshot %u", id);
+
+ if (ret)
+ goto err;
+
+ BUG_ON(s.v->children[1]);
+
+ parent_id = le32_to_cpu(s.v->parent);
+ child_id = le32_to_cpu(s.v->children[0]);
+
+ if (parent_id) {
+ struct bkey_i_snapshot *parent;
+
+ parent = bch2_bkey_get_mut_typed(trans, &p_iter,
+ BTREE_ID_snapshots, POS(0, parent_id),
+ 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(parent);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "missing snapshot %u", parent_id);
+ if (unlikely(ret))
+ goto err;
+
+ /* find entry in parent->children for node being deleted */
+ for (i = 0; i < 2; i++)
+ if (le32_to_cpu(parent->v.children[i]) == id)
+ break;
+
+ if (bch2_fs_inconsistent_on(i == 2, c,
+ "snapshot %u missing child pointer to %u",
+ parent_id, id))
+ goto err;
+
+ parent->v.children[i] = le32_to_cpu(child_id);
+
+ normalize_snapshot_child_pointers(&parent->v);
+ }
+
+ if (child_id) {
+ struct bkey_i_snapshot *child;
+
+ child = bch2_bkey_get_mut_typed(trans, &c_iter,
+ BTREE_ID_snapshots, POS(0, child_id),
+ 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(child);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "missing snapshot %u", child_id);
+ if (unlikely(ret))
+ goto err;
+
+ child->v.parent = cpu_to_le32(parent_id);
+
+ if (!child->v.parent) {
+ child->v.skip[0] = 0;
+ child->v.skip[1] = 0;
+ child->v.skip[2] = 0;
+ }
+ }
+
+ if (!parent_id) {
+ /*
+ * We're deleting the root of a snapshot tree: update the
+ * snapshot_tree entry to point to the new root, or delete it if
+ * this is the last snapshot ID in this tree:
+ */
+ struct bkey_i_snapshot_tree *s_t;
+
+ BUG_ON(s.v->children[1]);
+
+ s_t = bch2_bkey_get_mut_typed(trans, &tree_iter,
+ BTREE_ID_snapshot_trees, POS(0, le32_to_cpu(s.v->tree)),
+ 0, snapshot_tree);
+ ret = PTR_ERR_OR_ZERO(s_t);
+ if (ret)
+ goto err;
+
+ if (s.v->children[0]) {
+ s_t->v.root_snapshot = s.v->children[0];
+ } else {
+ s_t->k.type = KEY_TYPE_deleted;
+ set_bkey_val_u64s(&s_t->k, 0);
+ }
+ }
+
+ ret = bch2_btree_delete_at(trans, &iter, 0);
+err:
+ bch2_trans_iter_exit(trans, &tree_iter);
+ bch2_trans_iter_exit(trans, &p_iter);
+ bch2_trans_iter_exit(trans, &c_iter);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
+ u32 *new_snapids,
+ u32 *snapshot_subvols,
+ unsigned nr_snapids)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_i_snapshot *n;
+ struct bkey_s_c k;
+ unsigned i, j;
+ u32 depth = bch2_snapshot_depth(c, parent);
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
+ POS_MIN, BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < nr_snapids; i++) {
+ k = bch2_btree_iter_prev_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!k.k || !k.k->p.offset) {
+ ret = -BCH_ERR_ENOSPC_snapshot_create;
+ goto err;
+ }
+
+ n = bch2_bkey_alloc(trans, &iter, 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ goto err;
+
+ n->v.flags = 0;
+ n->v.parent = cpu_to_le32(parent);
+ n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
+ n->v.tree = cpu_to_le32(tree);
+ n->v.depth = cpu_to_le32(depth);
+
+ for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
+ n->v.skip[j] = cpu_to_le32(bch2_snapshot_skiplist_get(c, parent));
+
+ bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
+ SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
+
+ ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
+ bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0);
+ if (ret)
+ goto err;
+
+ new_snapids[i] = iter.pos.offset;
+
+ mutex_lock(&c->snapshot_table_lock);
+ snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i];
+ mutex_unlock(&c->snapshot_table_lock);
+ }
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/*
+ * Create new snapshot IDs as children of an existing snapshot ID:
+ */
+static int bch2_snapshot_node_create_children(struct btree_trans *trans, u32 parent,
+ u32 *new_snapids,
+ u32 *snapshot_subvols,
+ unsigned nr_snapids)
+{
+ struct btree_iter iter;
+ struct bkey_i_snapshot *n_parent;
+ int ret = 0;
+
+ n_parent = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_snapshots, POS(0, parent),
+ 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(n_parent);
+ if (unlikely(ret)) {
+ if (bch2_err_matches(ret, ENOENT))
+ bch_err(trans->c, "snapshot %u not found", parent);
+ return ret;
+ }
+
+ if (n_parent->v.children[0] || n_parent->v.children[1]) {
+ bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = create_snapids(trans, parent, le32_to_cpu(n_parent->v.tree),
+ new_snapids, snapshot_subvols, nr_snapids);
+ if (ret)
+ goto err;
+
+ n_parent->v.children[0] = cpu_to_le32(new_snapids[0]);
+ n_parent->v.children[1] = cpu_to_le32(new_snapids[1]);
+ n_parent->v.subvol = 0;
+ SET_BCH_SNAPSHOT_SUBVOL(&n_parent->v, false);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/*
+ * Create a snapshot node that is the root of a new tree:
+ */
+static int bch2_snapshot_node_create_tree(struct btree_trans *trans,
+ u32 *new_snapids,
+ u32 *snapshot_subvols,
+ unsigned nr_snapids)
+{
+ struct bkey_i_snapshot_tree *n_tree;
+ int ret;
+
+ n_tree = __bch2_snapshot_tree_create(trans);
+ ret = PTR_ERR_OR_ZERO(n_tree) ?:
+ create_snapids(trans, 0, n_tree->k.p.offset,
+ new_snapids, snapshot_subvols, nr_snapids);
+ if (ret)
+ return ret;
+
+ n_tree->v.master_subvol = cpu_to_le32(snapshot_subvols[0]);
+ n_tree->v.root_snapshot = cpu_to_le32(new_snapids[0]);
+ return 0;
+}
+
+int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
+ u32 *new_snapids,
+ u32 *snapshot_subvols,
+ unsigned nr_snapids)
+{
+ BUG_ON((parent == 0) != (nr_snapids == 1));
+ BUG_ON((parent != 0) != (nr_snapids == 2));
+
+ return parent
+ ? bch2_snapshot_node_create_children(trans, parent,
+ new_snapids, snapshot_subvols, nr_snapids)
+ : bch2_snapshot_node_create_tree(trans,
+ new_snapids, snapshot_subvols, nr_snapids);
+
+}
+
+/*
+ * If we have an unlinked inode in an internal snapshot node, and the inode
+ * really has been deleted in all child snapshots, how does this get cleaned up?
+ *
+ * first there is the problem of how keys that have been overwritten in all
+ * child snapshots get deleted (unimplemented?), but inodes may perhaps be
+ * special?
+ *
+ * also: unlinked inode in internal snapshot appears to not be getting deleted
+ * correctly if inode doesn't exist in leaf snapshots
+ *
+ * solution:
+ *
+ * for a key in an interior snapshot node that needs work to be done that
+ * requires it to be mutated: iterate over all descendent leaf nodes and copy
+ * that key to snapshot leaf nodes, where we can mutate it
+ */
+
+static int snapshot_delete_key(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ snapshot_id_list *deleted,
+ snapshot_id_list *equiv_seen,
+ struct bpos *last_pos)
+{
+ struct bch_fs *c = trans->c;
+ u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+
+ if (!bkey_eq(k.k->p, *last_pos))
+ equiv_seen->nr = 0;
+ *last_pos = k.k->p;
+
+ if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
+ snapshot_list_has_id(equiv_seen, equiv)) {
+ return bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ } else {
+ return snapshot_list_add(c, equiv_seen, equiv);
+ }
+}
+
+static int move_key_to_correct_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
+
+ /*
+ * When we have a linear chain of snapshot nodes, we consider
+ * those to form an equivalence class: we're going to collapse
+ * them all down to a single node, and keep the leaf-most node -
+ * which has the same id as the equivalence class id.
+ *
+ * If there are multiple keys in different snapshots at the same
+ * position, we're only going to keep the one in the newest
+ * snapshot - the rest have been overwritten and are redundant,
+ * and for the key we're going to keep we need to move it to the
+ * equivalance class ID if it's not there already.
+ */
+ if (equiv != k.k->p.snapshot) {
+ struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
+ struct btree_iter new_iter;
+ int ret;
+
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ return ret;
+
+ new->k.p.snapshot = equiv;
+
+ bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
+ BTREE_ITER_ALL_SNAPSHOTS|
+ BTREE_ITER_CACHED|
+ BTREE_ITER_INTENT);
+
+ ret = bch2_btree_iter_traverse(&new_iter) ?:
+ bch2_trans_update(trans, &new_iter, new,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch2_trans_iter_exit(trans, &new_iter);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * For a given snapshot, if it doesn't have a subvolume that points to it, and
+ * it doesn't have child snapshot nodes - it's now redundant and we can mark it
+ * as deleted.
+ */
+static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_snapshot snap;
+ u32 children[2];
+ int ret;
+
+ if (k.k->type != KEY_TYPE_snapshot)
+ return 0;
+
+ snap = bkey_s_c_to_snapshot(k);
+ if (BCH_SNAPSHOT_DELETED(snap.v) ||
+ BCH_SNAPSHOT_SUBVOL(snap.v))
+ return 0;
+
+ children[0] = le32_to_cpu(snap.v->children[0]);
+ children[1] = le32_to_cpu(snap.v->children[1]);
+
+ ret = bch2_snapshot_live(trans, children[0]) ?:
+ bch2_snapshot_live(trans, children[1]);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
+ return 0;
+}
+
+static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
+ snapshot_id_list *skip)
+{
+ rcu_read_lock();
+ while (snapshot_list_has_id(skip, id))
+ id = __bch2_snapshot_parent(c, id);
+
+ while (n--) {
+ do {
+ id = __bch2_snapshot_parent(c, id);
+ } while (snapshot_list_has_id(skip, id));
+ }
+ rcu_read_unlock();
+
+ return id;
+}
+
+static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter, struct bkey_s_c k,
+ snapshot_id_list *deleted)
+{
+ struct bch_fs *c = trans->c;
+ u32 nr_deleted_ancestors = 0;
+ struct bkey_i_snapshot *s;
+ u32 *i;
+ int ret;
+
+ if (k.k->type != KEY_TYPE_snapshot)
+ return 0;
+
+ if (snapshot_list_has_id(deleted, k.k->p.offset))
+ return 0;
+
+ s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (ret)
+ return ret;
+
+ darray_for_each(*deleted, i)
+ nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i);
+
+ if (!nr_deleted_ancestors)
+ return 0;
+
+ le32_add_cpu(&s->v.depth, -nr_deleted_ancestors);
+
+ if (!s->v.depth) {
+ s->v.skip[0] = 0;
+ s->v.skip[1] = 0;
+ s->v.skip[2] = 0;
+ } else {
+ u32 depth = le32_to_cpu(s->v.depth);
+ u32 parent = bch2_snapshot_parent(c, s->k.p.offset);
+
+ for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) {
+ u32 id = le32_to_cpu(s->v.skip[j]);
+
+ if (snapshot_list_has_id(deleted, id)) {
+ id = depth > 1
+ ? bch2_snapshot_nth_parent_skip(c,
+ parent,
+ get_random_u32_below(depth - 1),
+ deleted)
+ : parent;
+ s->v.skip[j] = cpu_to_le32(id);
+ }
+ }
+
+ bubble_sort(s->v.skip, ARRAY_SIZE(s->v.skip), cmp_le32);
+ }
+
+ return bch2_trans_update(trans, iter, &s->k_i, 0);
+}
+
+int bch2_delete_dead_snapshots(struct bch_fs *c)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_snapshot snap;
+ snapshot_id_list deleted = { 0 };
+ snapshot_id_list deleted_interior = { 0 };
+ u32 *i, id;
+ int ret = 0;
+
+ if (!test_bit(BCH_FS_STARTED, &c->flags)) {
+ ret = bch2_fs_read_write_early(c);
+ if (ret) {
+ bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
+ return ret;
+ }
+ }
+
+ trans = bch2_trans_get(c);
+
+ /*
+ * For every snapshot node: If we have no live children and it's not
+ * pointed to by a subvolume, delete it:
+ */
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
+ POS_MIN, 0, k,
+ NULL, NULL, 0,
+ bch2_delete_redundant_snapshot(trans, &iter, k));
+ if (ret) {
+ bch_err_msg(c, ret, "deleting redundant snapshots");
+ goto err;
+ }
+
+ ret = for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
+ POS_MIN, 0, k,
+ bch2_snapshot_set_equiv(trans, k));
+ if (ret) {
+ bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
+ goto err;
+ }
+
+ for_each_btree_key(trans, iter, BTREE_ID_snapshots,
+ POS_MIN, 0, k, ret) {
+ if (k.k->type != KEY_TYPE_snapshot)
+ continue;
+
+ snap = bkey_s_c_to_snapshot(k);
+ if (BCH_SNAPSHOT_DELETED(snap.v)) {
+ ret = snapshot_list_add(c, &deleted, k.k->p.offset);
+ if (ret)
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret) {
+ bch_err_msg(c, ret, "walking snapshots");
+ goto err;
+ }
+
+ for (id = 0; id < BTREE_ID_NR; id++) {
+ struct bpos last_pos = POS_MIN;
+ snapshot_id_list equiv_seen = { 0 };
+ struct disk_reservation res = { 0 };
+
+ if (!btree_type_has_snapshots(id))
+ continue;
+
+ ret = for_each_btree_key_commit(trans, iter,
+ id, POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ &res, NULL, BTREE_INSERT_NOFAIL,
+ snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
+ for_each_btree_key_commit(trans, iter,
+ id, POS_MIN,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ &res, NULL, BTREE_INSERT_NOFAIL,
+ move_key_to_correct_snapshot(trans, &iter, k));
+
+ bch2_disk_reservation_put(c, &res);
+ darray_exit(&equiv_seen);
+
+ if (ret) {
+ bch_err_msg(c, ret, "deleting keys from dying snapshots");
+ goto err;
+ }
+ }
+
+ down_write(&c->snapshot_create_lock);
+
+ for_each_btree_key(trans, iter, BTREE_ID_snapshots,
+ POS_MIN, 0, k, ret) {
+ u32 snapshot = k.k->p.offset;
+ u32 equiv = bch2_snapshot_equiv(c, snapshot);
+
+ if (equiv != snapshot)
+ snapshot_list_add(c, &deleted_interior, snapshot);
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ goto err_create_lock;
+
+ /*
+ * Fixing children of deleted snapshots can't be done completely
+ * atomically, if we crash between here and when we delete the interior
+ * nodes some depth fields will be off:
+ */
+ ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
+ BTREE_ITER_INTENT, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
+ if (ret)
+ goto err_create_lock;
+
+ darray_for_each(deleted, i) {
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_snapshot_node_delete(trans, *i));
+ if (ret) {
+ bch_err_msg(c, ret, "deleting snapshot %u", *i);
+ goto err_create_lock;
+ }
+ }
+
+ darray_for_each(deleted_interior, i) {
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_snapshot_node_delete(trans, *i));
+ if (ret) {
+ bch_err_msg(c, ret, "deleting snapshot %u", *i);
+ goto err_create_lock;
+ }
+ }
+
+ clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
+err_create_lock:
+ up_write(&c->snapshot_create_lock);
+err:
+ darray_exit(&deleted_interior);
+ darray_exit(&deleted);
+ bch2_trans_put(trans);
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+void bch2_delete_dead_snapshots_work(struct work_struct *work)
+{
+ struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
+
+ if (test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
+ bch2_delete_dead_snapshots(c);
+ bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
+}
+
+void bch2_delete_dead_snapshots_async(struct bch_fs *c)
+{
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) &&
+ !queue_work(c->write_ref_wq, &c->snapshot_delete_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
+}
+
+int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
+ struct btree_trans_commit_hook *h)
+{
+ struct bch_fs *c = trans->c;
+
+ set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
+
+ if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_delete_dead_snapshots)
+ return 0;
+
+ bch2_delete_dead_snapshots_async(c);
+ return 0;
+}
+
+int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
+ enum btree_id id,
+ struct bpos pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, id, pos,
+ BTREE_ITER_NOT_EXTENTS|
+ BTREE_ITER_ALL_SNAPSHOTS);
+ while (1) {
+ k = bch2_btree_iter_prev(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ break;
+
+ if (!k.k)
+ break;
+
+ if (!bkey_eq(pos, k.k->p))
+ break;
+
+ if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
+ ret = 1;
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+static u32 bch2_snapshot_smallest_child(struct bch_fs *c, u32 id)
+{
+ const struct snapshot_t *s = snapshot_t(c, id);
+
+ return s->children[1] ?: s->children[0];
+}
+
+static u32 bch2_snapshot_smallest_descendent(struct bch_fs *c, u32 id)
+{
+ u32 child;
+
+ while ((child = bch2_snapshot_smallest_child(c, id)))
+ id = child;
+ return id;
+}
+
+static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
+ enum btree_id btree,
+ struct bkey_s_c interior_k,
+ u32 leaf_id, struct bpos *new_min_pos)
+{
+ struct btree_iter iter;
+ struct bpos pos = interior_k.k->p;
+ struct bkey_s_c k;
+ struct bkey_i *new;
+ int ret;
+
+ pos.snapshot = leaf_id;
+
+ bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto out;
+
+ /* key already overwritten in this snapshot? */
+ if (k.k->p.snapshot != interior_k.k->p.snapshot)
+ goto out;
+
+ if (bpos_eq(*new_min_pos, POS_MIN)) {
+ *new_min_pos = k.k->p;
+ new_min_pos->snapshot = leaf_id;
+ }
+
+ new = bch2_bkey_make_mut_noupdate(trans, interior_k);
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ goto out;
+
+ new->k.p.snapshot = leaf_id;
+ ret = bch2_trans_update(trans, &iter, new, 0);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *trans,
+ enum btree_id btree,
+ struct bkey_s_c k,
+ struct bpos *new_min_pos)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_buf sk;
+ u32 restart_count = trans->restart_count;
+ int ret = 0;
+
+ bch2_bkey_buf_init(&sk);
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ k = bkey_i_to_s_c(sk.k);
+
+ *new_min_pos = POS_MIN;
+
+ for (u32 id = bch2_snapshot_smallest_descendent(c, k.k->p.snapshot);
+ id < k.k->p.snapshot;
+ id++) {
+ if (!bch2_snapshot_is_ancestor(c, id, k.k->p.snapshot) ||
+ !bch2_snapshot_is_leaf(c, id))
+ continue;
+again:
+ ret = btree_trans_too_many_iters(trans) ?:
+ bch2_propagate_key_to_snapshot_leaf(trans, btree, k, id, new_min_pos) ?:
+ bch2_trans_commit(trans, NULL, NULL, 0);
+ if (ret && bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+ bch2_trans_begin(trans);
+ goto again;
+ }
+
+ if (ret)
+ break;
+ }
+
+ bch2_bkey_buf_exit(&sk, c);
+
+ return ret ?: trans_was_restarted(trans, restart_count);
+}
+
+int bch2_snapshots_read(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
+ POS_MIN, 0, k,
+ bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
+ bch2_snapshot_set_equiv(trans, k)) ?:
+ for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
+ POS_MIN, 0, k,
+ (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+void bch2_fs_snapshots_exit(struct bch_fs *c)
+{
+ kfree(rcu_dereference_protected(c->snapshots, true));
+}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
new file mode 100644
index 000000000000..de215d9d1252
--- /dev/null
+++ b/fs/bcachefs/snapshot.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SNAPSHOT_H
+#define _BCACHEFS_SNAPSHOT_H
+
+enum bkey_invalid_flags;
+
+void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+int bch2_snapshot_tree_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+
+#define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \
+ .key_invalid = bch2_snapshot_tree_invalid, \
+ .val_to_text = bch2_snapshot_tree_to_text, \
+ .min_val_size = 8, \
+})
+
+struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *);
+
+int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *);
+
+void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_s_c, unsigned);
+
+#define bch2_bkey_ops_snapshot ((struct bkey_ops) { \
+ .key_invalid = bch2_snapshot_invalid, \
+ .val_to_text = bch2_snapshot_to_text, \
+ .atomic_trigger = bch2_mark_snapshot, \
+ .min_val_size = 24, \
+})
+
+static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
+{
+ return &t->s[U32_MAX - id];
+}
+
+static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
+{
+ return __snapshot_t(rcu_dereference(c->snapshots), id);
+}
+
+static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
+{
+ rcu_read_lock();
+ id = snapshot_t(c, id)->tree;
+ rcu_read_unlock();
+
+ return id;
+}
+
+static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
+{
+ return snapshot_t(c, id)->parent;
+}
+
+static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
+{
+ rcu_read_lock();
+ id = __bch2_snapshot_parent_early(c, id);
+ rcu_read_unlock();
+
+ return id;
+}
+
+static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ u32 parent = snapshot_t(c, id)->parent;
+
+ if (parent &&
+ snapshot_t(c, id)->depth != snapshot_t(c, parent)->depth + 1)
+ panic("id %u depth=%u parent %u depth=%u\n",
+ id, snapshot_t(c, id)->depth,
+ parent, snapshot_t(c, parent)->depth);
+
+ return parent;
+#else
+ return snapshot_t(c, id)->parent;
+#endif
+}
+
+static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
+{
+ rcu_read_lock();
+ id = __bch2_snapshot_parent(c, id);
+ rcu_read_unlock();
+
+ return id;
+}
+
+static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n)
+{
+ rcu_read_lock();
+ while (n--)
+ id = __bch2_snapshot_parent(c, id);
+ rcu_read_unlock();
+
+ return id;
+}
+
+u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32);
+
+static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
+{
+ u32 parent;
+
+ rcu_read_lock();
+ while ((parent = __bch2_snapshot_parent(c, id)))
+ id = parent;
+ rcu_read_unlock();
+
+ return id;
+}
+
+static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id)
+{
+ return snapshot_t(c, id)->equiv;
+}
+
+static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id)
+{
+ rcu_read_lock();
+ id = __bch2_snapshot_equiv(c, id);
+ rcu_read_unlock();
+
+ return id;
+}
+
+static inline bool bch2_snapshot_is_equiv(struct bch_fs *c, u32 id)
+{
+ return id == bch2_snapshot_equiv(c, id);
+}
+
+static inline bool bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
+{
+ const struct snapshot_t *s;
+ bool ret;
+
+ rcu_read_lock();
+ s = snapshot_t(c, id);
+ ret = s->children[0];
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline u32 bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
+{
+ return !bch2_snapshot_is_internal_node(c, id);
+}
+
+static inline u32 bch2_snapshot_sibling(struct bch_fs *c, u32 id)
+{
+ const struct snapshot_t *s;
+ u32 parent = __bch2_snapshot_parent(c, id);
+
+ if (!parent)
+ return 0;
+
+ s = snapshot_t(c, __bch2_snapshot_parent(c, id));
+ if (id == s->children[0])
+ return s->children[1];
+ if (id == s->children[1])
+ return s->children[0];
+ return 0;
+}
+
+static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent)
+{
+ u32 depth;
+
+ rcu_read_lock();
+ depth = parent ? snapshot_t(c, parent)->depth + 1 : 0;
+ rcu_read_unlock();
+
+ return depth;
+}
+
+bool __bch2_snapshot_is_ancestor(struct bch_fs *, u32, u32);
+
+static inline bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
+{
+ return id == ancestor
+ ? true
+ : __bch2_snapshot_is_ancestor(c, id, ancestor);
+}
+
+static inline bool bch2_snapshot_has_children(struct bch_fs *c, u32 id)
+{
+ const struct snapshot_t *t;
+ bool ret;
+
+ rcu_read_lock();
+ t = snapshot_t(c, id);
+ ret = (t->children[0]|t->children[1]) != 0;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool snapshot_list_has_id(snapshot_id_list *s, u32 id)
+{
+ u32 *i;
+
+ darray_for_each(*s, i)
+ if (*i == id)
+ return true;
+ return false;
+}
+
+static inline bool snapshot_list_has_ancestor(struct bch_fs *c, snapshot_id_list *s, u32 id)
+{
+ u32 *i;
+
+ darray_for_each(*s, i)
+ if (bch2_snapshot_is_ancestor(c, id, *i))
+ return true;
+ return false;
+}
+
+static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 id)
+{
+ int ret;
+
+ BUG_ON(snapshot_list_has_id(s, id));
+ ret = darray_push(s, id);
+ if (ret)
+ bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
+ return ret;
+}
+
+int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
+ struct bch_snapshot *s);
+int bch2_snapshot_get_subvol(struct btree_trans *, u32,
+ struct bch_subvolume *);
+
+/* only exported for tests: */
+int bch2_snapshot_node_create(struct btree_trans *, u32,
+ u32 *, u32 *, unsigned);
+
+int bch2_check_snapshot_trees(struct bch_fs *);
+int bch2_check_snapshots(struct bch_fs *);
+
+int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
+int bch2_delete_dead_snapshots_hook(struct btree_trans *,
+ struct btree_trans_commit_hook *);
+void bch2_delete_dead_snapshots_work(struct work_struct *);
+
+int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos);
+
+static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
+ enum btree_id id,
+ struct bpos pos)
+{
+ if (!btree_type_has_snapshots(id) ||
+ bch2_snapshot_is_leaf(trans->c, pos.snapshot))
+ return 0;
+
+ return __bch2_key_has_snapshot_overwrites(trans, id, pos);
+}
+
+int bch2_propagate_key_to_snapshot_leaves(struct btree_trans *, enum btree_id,
+ struct bkey_s_c, struct bpos *);
+
+int bch2_snapshots_read(struct bch_fs *);
+void bch2_fs_snapshots_exit(struct bch_fs *);
+
+#endif /* _BCACHEFS_SNAPSHOT_H */
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
new file mode 100644
index 000000000000..ae21a8cca1b4
--- /dev/null
+++ b/fs/bcachefs/str_hash.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_STR_HASH_H
+#define _BCACHEFS_STR_HASH_H
+
+#include "btree_iter.h"
+#include "btree_update.h"
+#include "checksum.h"
+#include "error.h"
+#include "inode.h"
+#include "siphash.h"
+#include "subvolume.h"
+#include "super.h"
+
+#include <linux/crc32c.h>
+#include <crypto/hash.h>
+#include <crypto/sha2.h>
+
+static inline enum bch_str_hash_type
+bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
+{
+ switch (opt) {
+ case BCH_STR_HASH_OPT_crc32c:
+ return BCH_STR_HASH_crc32c;
+ case BCH_STR_HASH_OPT_crc64:
+ return BCH_STR_HASH_crc64;
+ case BCH_STR_HASH_OPT_siphash:
+ return c->sb.features & (1ULL << BCH_FEATURE_new_siphash)
+ ? BCH_STR_HASH_siphash
+ : BCH_STR_HASH_siphash_old;
+ default:
+ BUG();
+ }
+}
+
+struct bch_hash_info {
+ u8 type;
+ /*
+ * For crc32 or crc64 string hashes the first key value of
+ * the siphash_key (k0) is used as the key.
+ */
+ SIPHASH_KEY siphash_key;
+};
+
+static inline struct bch_hash_info
+bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
+{
+ /* XXX ick */
+ struct bch_hash_info info = {
+ .type = (bi->bi_flags >> INODE_STR_HASH_OFFSET) &
+ ~(~0U << INODE_STR_HASH_BITS),
+ .siphash_key = { .k0 = bi->bi_hash_seed }
+ };
+
+ if (unlikely(info.type == BCH_STR_HASH_siphash_old)) {
+ SHASH_DESC_ON_STACK(desc, c->sha256);
+ u8 digest[SHA256_DIGEST_SIZE];
+
+ desc->tfm = c->sha256;
+
+ crypto_shash_digest(desc, (void *) &bi->bi_hash_seed,
+ sizeof(bi->bi_hash_seed), digest);
+ memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
+ }
+
+ return info;
+}
+
+struct bch_str_hash_ctx {
+ union {
+ u32 crc32c;
+ u64 crc64;
+ SIPHASH_CTX siphash;
+ };
+};
+
+static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
+ const struct bch_hash_info *info)
+{
+ switch (info->type) {
+ case BCH_STR_HASH_crc32c:
+ ctx->crc32c = crc32c(~0, &info->siphash_key.k0,
+ sizeof(info->siphash_key.k0));
+ break;
+ case BCH_STR_HASH_crc64:
+ ctx->crc64 = crc64_be(~0, &info->siphash_key.k0,
+ sizeof(info->siphash_key.k0));
+ break;
+ case BCH_STR_HASH_siphash_old:
+ case BCH_STR_HASH_siphash:
+ SipHash24_Init(&ctx->siphash, &info->siphash_key);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
+ const struct bch_hash_info *info,
+ const void *data, size_t len)
+{
+ switch (info->type) {
+ case BCH_STR_HASH_crc32c:
+ ctx->crc32c = crc32c(ctx->crc32c, data, len);
+ break;
+ case BCH_STR_HASH_crc64:
+ ctx->crc64 = crc64_be(ctx->crc64, data, len);
+ break;
+ case BCH_STR_HASH_siphash_old:
+ case BCH_STR_HASH_siphash:
+ SipHash24_Update(&ctx->siphash, data, len);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
+ const struct bch_hash_info *info)
+{
+ switch (info->type) {
+ case BCH_STR_HASH_crc32c:
+ return ctx->crc32c;
+ case BCH_STR_HASH_crc64:
+ return ctx->crc64 >> 1;
+ case BCH_STR_HASH_siphash_old:
+ case BCH_STR_HASH_siphash:
+ return SipHash24_End(&ctx->siphash) >> 1;
+ default:
+ BUG();
+ }
+}
+
+struct bch_hash_desc {
+ enum btree_id btree_id;
+ u8 key_type;
+
+ u64 (*hash_key)(const struct bch_hash_info *, const void *);
+ u64 (*hash_bkey)(const struct bch_hash_info *, struct bkey_s_c);
+ bool (*cmp_key)(struct bkey_s_c, const void *);
+ bool (*cmp_bkey)(struct bkey_s_c, struct bkey_s_c);
+ bool (*is_visible)(subvol_inum inum, struct bkey_s_c);
+};
+
+static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, struct bkey_s_c k)
+{
+ return k.k->type == desc.key_type &&
+ (!desc.is_visible ||
+ !inum.inum ||
+ desc.is_visible(inum, k));
+}
+
+static __always_inline int
+bch2_hash_lookup(struct btree_trans *trans,
+ struct btree_iter *iter,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ subvol_inum inum, const void *key,
+ unsigned flags)
+{
+ struct bkey_s_c k;
+ u32 snapshot;
+ int ret;
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ return ret;
+
+ for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
+ SPOS(inum.inum, desc.hash_key(info, key), snapshot),
+ POS(inum.inum, U64_MAX),
+ BTREE_ITER_SLOTS|flags, k, ret) {
+ if (is_visible_key(desc, inum, k)) {
+ if (!desc.cmp_key(k, key))
+ return 0;
+ } else if (k.k->type == KEY_TYPE_hash_whiteout) {
+ ;
+ } else {
+ /* hole, not found */
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, iter);
+
+ return ret ?: -BCH_ERR_ENOENT_str_hash_lookup;
+}
+
+static __always_inline int
+bch2_hash_hole(struct btree_trans *trans,
+ struct btree_iter *iter,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ subvol_inum inum, const void *key)
+{
+ struct bkey_s_c k;
+ u32 snapshot;
+ int ret;
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ return ret;
+
+ for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
+ SPOS(inum.inum, desc.hash_key(info, key), snapshot),
+ POS(inum.inum, U64_MAX),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret)
+ if (!is_visible_key(desc, inum, k))
+ return 0;
+ bch2_trans_iter_exit(trans, iter);
+
+ return ret ?: -BCH_ERR_ENOSPC_str_hash_create;
+}
+
+static __always_inline
+int bch2_hash_needs_whiteout(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ struct btree_iter *start)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_trans_copy_iter(&iter, start);
+
+ bch2_btree_iter_advance(&iter);
+
+ for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, k, ret) {
+ if (k.k->type != desc.key_type &&
+ k.k->type != KEY_TYPE_hash_whiteout)
+ break;
+
+ if (k.k->type == desc.key_type &&
+ desc.hash_bkey(info, k) <= start->pos.offset) {
+ ret = 1;
+ break;
+ }
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static __always_inline
+int bch2_hash_set_snapshot(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ subvol_inum inum, u32 snapshot,
+ struct bkey_i *insert,
+ int flags,
+ int update_flags)
+{
+ struct btree_iter iter, slot = { NULL };
+ struct bkey_s_c k;
+ bool found = false;
+ int ret;
+
+ for_each_btree_key_upto_norestart(trans, iter, desc.btree_id,
+ SPOS(insert->k.p.inode,
+ desc.hash_bkey(info, bkey_i_to_s_c(insert)),
+ snapshot),
+ POS(insert->k.p.inode, U64_MAX),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
+ if (is_visible_key(desc, inum, k)) {
+ if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
+ goto found;
+
+ /* hash collision: */
+ continue;
+ }
+
+ if (!slot.path &&
+ !(flags & BCH_HASH_SET_MUST_REPLACE))
+ bch2_trans_copy_iter(&slot, &iter);
+
+ if (k.k->type != KEY_TYPE_hash_whiteout)
+ goto not_found;
+ }
+
+ if (!ret)
+ ret = -BCH_ERR_ENOSPC_str_hash_create;
+out:
+ bch2_trans_iter_exit(trans, &slot);
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+found:
+ found = true;
+not_found:
+
+ if (!found && (flags & BCH_HASH_SET_MUST_REPLACE)) {
+ ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
+ } else if (found && (flags & BCH_HASH_SET_MUST_CREATE)) {
+ ret = -EEXIST;
+ } else {
+ if (!found && slot.path)
+ swap(iter, slot);
+
+ insert->k.p = iter.pos;
+ ret = bch2_trans_update(trans, &iter, insert, 0);
+ }
+
+ goto out;
+}
+
+static __always_inline
+int bch2_hash_set(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ subvol_inum inum,
+ struct bkey_i *insert, int flags)
+{
+ u32 snapshot;
+ int ret;
+
+ ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ return ret;
+
+ insert->k.p.inode = inum.inum;
+
+ return bch2_hash_set_snapshot(trans, desc, info, inum,
+ snapshot, insert, flags, 0);
+}
+
+static __always_inline
+int bch2_hash_delete_at(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ struct btree_iter *iter,
+ unsigned update_flags)
+{
+ struct bkey_i *delete;
+ int ret;
+
+ delete = bch2_trans_kmalloc(trans, sizeof(*delete));
+ ret = PTR_ERR_OR_ZERO(delete);
+ if (ret)
+ return ret;
+
+ ret = bch2_hash_needs_whiteout(trans, desc, info, iter);
+ if (ret < 0)
+ return ret;
+
+ bkey_init(&delete->k);
+ delete->k.p = iter->pos;
+ delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted;
+
+ return bch2_trans_update(trans, iter, delete, update_flags);
+}
+
+static __always_inline
+int bch2_hash_delete(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ const struct bch_hash_info *info,
+ subvol_inum inum, const void *key)
+{
+ struct btree_iter iter;
+ int ret;
+
+ ret = bch2_hash_lookup(trans, &iter, desc, info, inum, key,
+ BTREE_ITER_INTENT);
+ if (ret)
+ return ret;
+
+ ret = bch2_hash_delete_at(trans, desc, info, &iter, 0);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+#endif /* _BCACHEFS_STR_HASH_H */
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
new file mode 100644
index 000000000000..caf2dd7dafff
--- /dev/null
+++ b/fs/bcachefs/subvolume.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_key_cache.h"
+#include "btree_update.h"
+#include "errcode.h"
+#include "error.h"
+#include "fs.h"
+#include "snapshot.h"
+#include "subvolume.h"
+
+#include <linux/random.h>
+
+static int bch2_subvolume_delete(struct btree_trans *, u32);
+
+static int check_subvol(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c_subvolume subvol;
+ struct bch_snapshot snapshot;
+ unsigned snapid;
+ int ret = 0;
+
+ if (k.k->type != KEY_TYPE_subvolume)
+ return 0;
+
+ subvol = bkey_s_c_to_subvolume(k);
+ snapid = le32_to_cpu(subvol.v->snapshot);
+ ret = bch2_snapshot_lookup(trans, snapid, &snapshot);
+
+ if (bch2_err_matches(ret, ENOENT))
+ bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
+ k.k->p.offset, snapid);
+ if (ret)
+ return ret;
+
+ if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
+ bch2_fs_lazy_rw(c);
+
+ ret = bch2_subvolume_delete(trans, iter->pos.offset);
+ if (ret)
+ bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
+ return ret ?: -BCH_ERR_transaction_restart_nested;
+ }
+
+ if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
+ u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
+ u32 snapshot_tree;
+ struct bch_snapshot_tree st;
+
+ rcu_read_lock();
+ snapshot_tree = snapshot_t(c, snapshot_root)->tree;
+ rcu_read_unlock();
+
+ ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
+
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "%s: snapshot tree %u not found", __func__, snapshot_tree);
+
+ if (ret)
+ return ret;
+
+ if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c,
+ "subvolume %llu is not set as snapshot but is not master subvolume",
+ k.k->p.offset)) {
+ struct bkey_i_subvolume *s =
+ bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (ret)
+ return ret;
+
+ SET_BCH_SUBVOLUME_SNAP(&s->v, true);
+ }
+ }
+
+fsck_err:
+ return ret;
+}
+
+int bch2_check_subvols(struct bch_fs *c)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_subvol(trans, &iter, k)));
+ if (ret)
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+/* Subvolumes: */
+
+int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags, struct printbuf *err)
+{
+ if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
+ bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
+ prt_printf(err, "invalid pos");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
+
+ prt_printf(out, "root %llu snapshot id %u",
+ le64_to_cpu(s.v->inode),
+ le32_to_cpu(s.v->snapshot));
+
+ if (bkey_val_bytes(s.k) > offsetof(struct bch_subvolume, parent))
+ prt_printf(out, " parent %u", le32_to_cpu(s.v->parent));
+}
+
+static __always_inline int
+bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
+ bool inconsistent_if_not_found,
+ int iter_flags,
+ struct bch_subvolume *s)
+{
+ int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol),
+ iter_flags, subvolume, s);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) &&
+ inconsistent_if_not_found,
+ trans->c, "missing subvolume %u", subvol);
+ return ret;
+}
+
+int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
+ bool inconsistent_if_not_found,
+ int iter_flags,
+ struct bch_subvolume *s)
+{
+ return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
+}
+
+int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
+ struct bch_subvolume *subvol)
+{
+ struct bch_snapshot snap;
+
+ return bch2_snapshot_lookup(trans, snapshot, &snap) ?:
+ bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol);
+}
+
+int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
+ u32 *snapid)
+{
+ struct btree_iter iter;
+ struct bkey_s_c_subvolume subvol;
+ int ret;
+
+ subvol = bch2_bkey_get_iter_typed(trans, &iter,
+ BTREE_ID_subvolumes, POS(0, subvolid),
+ BTREE_ITER_CACHED|BTREE_ITER_WITH_UPDATES,
+ subvolume);
+ ret = bkey_err(subvol);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
+ "missing subvolume %u", subvolid);
+
+ if (likely(!ret))
+ *snapid = le32_to_cpu(subvol.v->snapshot);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int bch2_subvolume_reparent(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k,
+ u32 old_parent, u32 new_parent)
+{
+ struct bkey_i_subvolume *s;
+ int ret;
+
+ if (k.k->type != KEY_TYPE_subvolume)
+ return 0;
+
+ if (bkey_val_bytes(k.k) > offsetof(struct bch_subvolume, parent) &&
+ le32_to_cpu(bkey_s_c_to_subvolume(k).v->parent) != old_parent)
+ return 0;
+
+ s = bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume);
+ ret = PTR_ERR_OR_ZERO(s);
+ if (ret)
+ return ret;
+
+ s->v.parent = cpu_to_le32(new_parent);
+ return 0;
+}
+
+/*
+ * Separate from the snapshot tree in the snapshots btree, we record the tree
+ * structure of how snapshot subvolumes were created - the parent subvolume of
+ * each snapshot subvolume.
+ *
+ * When a subvolume is deleted, we scan for child subvolumes and reparant them,
+ * to avoid dangling references:
+ */
+static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_subvolume s;
+
+ return lockrestart_do(trans,
+ bch2_subvolume_get(trans, subvolid_to_delete, true,
+ BTREE_ITER_CACHED, &s)) ?:
+ for_each_btree_key_commit(trans, iter,
+ BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL,
+ bch2_subvolume_reparent(trans, &iter, k,
+ subvolid_to_delete, le32_to_cpu(s.parent)));
+}
+
+/*
+ * Delete subvolume, mark snapshot ID as deleted, queue up snapshot
+ * deletion/cleanup:
+ */
+static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
+{
+ struct btree_iter iter;
+ struct bkey_s_c_subvolume subvol;
+ struct btree_trans_commit_hook *h;
+ u32 snapid;
+ int ret = 0;
+
+ subvol = bch2_bkey_get_iter_typed(trans, &iter,
+ BTREE_ID_subvolumes, POS(0, subvolid),
+ BTREE_ITER_CACHED|BTREE_ITER_INTENT,
+ subvolume);
+ ret = bkey_err(subvol);
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
+ "missing subvolume %u", subvolid);
+ if (ret)
+ return ret;
+
+ snapid = le32_to_cpu(subvol.v->snapshot);
+
+ ret = bch2_btree_delete_at(trans, &iter, 0);
+ if (ret)
+ goto err;
+
+ ret = bch2_snapshot_node_set_deleted(trans, snapid);
+ if (ret)
+ goto err;
+
+ h = bch2_trans_kmalloc(trans, sizeof(*h));
+ ret = PTR_ERR_OR_ZERO(h);
+ if (ret)
+ goto err;
+
+ h->fn = bch2_delete_dead_snapshots_hook;
+ bch2_trans_commit_hook(trans, h);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
+{
+ return bch2_subvolumes_reparent(trans, subvolid) ?:
+ commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+ __bch2_subvolume_delete(trans, subvolid));
+}
+
+static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
+{
+ struct bch_fs *c = container_of(work, struct bch_fs,
+ snapshot_wait_for_pagecache_and_delete_work);
+ snapshot_id_list s;
+ u32 *id;
+ int ret = 0;
+
+ while (!ret) {
+ mutex_lock(&c->snapshots_unlinked_lock);
+ s = c->snapshots_unlinked;
+ darray_init(&c->snapshots_unlinked);
+ mutex_unlock(&c->snapshots_unlinked_lock);
+
+ if (!s.nr)
+ break;
+
+ bch2_evict_subvolume_inodes(c, &s);
+
+ for (id = s.data; id < s.data + s.nr; id++) {
+ ret = bch2_trans_run(c, bch2_subvolume_delete(trans, *id));
+ if (ret) {
+ bch_err_msg(c, ret, "deleting subvolume %u", *id);
+ break;
+ }
+ }
+
+ darray_exit(&s);
+ }
+
+ bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
+}
+
+struct subvolume_unlink_hook {
+ struct btree_trans_commit_hook h;
+ u32 subvol;
+};
+
+static int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
+ struct btree_trans_commit_hook *_h)
+{
+ struct subvolume_unlink_hook *h = container_of(_h, struct subvolume_unlink_hook, h);
+ struct bch_fs *c = trans->c;
+ int ret = 0;
+
+ mutex_lock(&c->snapshots_unlinked_lock);
+ if (!snapshot_list_has_id(&c->snapshots_unlinked, h->subvol))
+ ret = snapshot_list_add(c, &c->snapshots_unlinked, h->subvol);
+ mutex_unlock(&c->snapshots_unlinked_lock);
+
+ if (ret)
+ return ret;
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_snapshot_delete_pagecache))
+ return -EROFS;
+
+ if (!queue_work(c->write_ref_wq, &c->snapshot_wait_for_pagecache_and_delete_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_snapshot_delete_pagecache);
+ return 0;
+}
+
+int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
+{
+ struct btree_iter iter;
+ struct bkey_i_subvolume *n;
+ struct subvolume_unlink_hook *h;
+ int ret = 0;
+
+ h = bch2_trans_kmalloc(trans, sizeof(*h));
+ ret = PTR_ERR_OR_ZERO(h);
+ if (ret)
+ return ret;
+
+ h->h.fn = bch2_subvolume_wait_for_pagecache_and_delete_hook;
+ h->subvol = subvolid;
+ bch2_trans_commit_hook(trans, &h->h);
+
+ n = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_subvolumes, POS(0, subvolid),
+ BTREE_ITER_CACHED, subvolume);
+ ret = PTR_ERR_OR_ZERO(n);
+ if (unlikely(ret)) {
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
+ "missing subvolume %u", subvolid);
+ return ret;
+ }
+
+ SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
+ u32 src_subvolid,
+ u32 *new_subvolid,
+ u32 *new_snapshotid,
+ bool ro)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
+ struct bkey_i_subvolume *new_subvol = NULL;
+ struct bkey_i_subvolume *src_subvol = NULL;
+ u32 parent = 0, new_nodes[2], snapshot_subvols[2];
+ int ret = 0;
+
+ ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
+ BTREE_ID_subvolumes, POS(0, U32_MAX));
+ if (ret == -BCH_ERR_ENOSPC_btree_slot)
+ ret = -BCH_ERR_ENOSPC_subvolume_create;
+ if (ret)
+ return ret;
+
+ snapshot_subvols[0] = dst_iter.pos.offset;
+ snapshot_subvols[1] = src_subvolid;
+
+ if (src_subvolid) {
+ /* Creating a snapshot: */
+
+ src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
+ BTREE_ID_subvolumes, POS(0, src_subvolid),
+ BTREE_ITER_CACHED, subvolume);
+ ret = PTR_ERR_OR_ZERO(src_subvol);
+ if (unlikely(ret)) {
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
+ "subvolume %u not found", src_subvolid);
+ goto err;
+ }
+
+ parent = le32_to_cpu(src_subvol->v.snapshot);
+ }
+
+ ret = bch2_snapshot_node_create(trans, parent, new_nodes,
+ snapshot_subvols,
+ src_subvolid ? 2 : 1);
+ if (ret)
+ goto err;
+
+ if (src_subvolid) {
+ src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
+ ret = bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
+ if (ret)
+ goto err;
+ }
+
+ new_subvol = bch2_bkey_alloc(trans, &dst_iter, 0, subvolume);
+ ret = PTR_ERR_OR_ZERO(new_subvol);
+ if (ret)
+ goto err;
+
+ new_subvol->v.flags = 0;
+ new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
+ new_subvol->v.inode = cpu_to_le64(inode);
+ new_subvol->v.parent = cpu_to_le32(src_subvolid);
+ new_subvol->v.otime.lo = cpu_to_le64(bch2_current_time(c));
+ new_subvol->v.otime.hi = 0;
+
+ SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
+ SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
+
+ *new_subvolid = new_subvol->k.p.offset;
+ *new_snapshotid = new_nodes[0];
+err:
+ bch2_trans_iter_exit(trans, &src_iter);
+ bch2_trans_iter_exit(trans, &dst_iter);
+ return ret;
+}
+
+int bch2_fs_subvolumes_init(struct bch_fs *c)
+{
+ INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
+ INIT_WORK(&c->snapshot_wait_for_pagecache_and_delete_work,
+ bch2_subvolume_wait_for_pagecache_and_delete);
+ mutex_init(&c->snapshots_unlinked_lock);
+ return 0;
+}
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
new file mode 100644
index 000000000000..bb14f92e8687
--- /dev/null
+++ b/fs/bcachefs/subvolume.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SUBVOLUME_H
+#define _BCACHEFS_SUBVOLUME_H
+
+#include "darray.h"
+#include "subvolume_types.h"
+
+enum bkey_invalid_flags;
+
+int bch2_check_subvols(struct bch_fs *);
+
+int bch2_subvolume_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_subvolume ((struct bkey_ops) { \
+ .key_invalid = bch2_subvolume_invalid, \
+ .val_to_text = bch2_subvolume_to_text, \
+ .min_val_size = 16, \
+})
+
+int bch2_subvolume_get(struct btree_trans *, unsigned,
+ bool, int, struct bch_subvolume *);
+int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *);
+
+int bch2_delete_dead_snapshots(struct bch_fs *);
+void bch2_delete_dead_snapshots_async(struct bch_fs *);
+
+int bch2_subvolume_unlink(struct btree_trans *, u32);
+int bch2_subvolume_create(struct btree_trans *, u64, u32,
+ u32 *, u32 *, bool);
+
+int bch2_fs_subvolumes_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_SUBVOLUME_H */
diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h
new file mode 100644
index 000000000000..86833445af20
--- /dev/null
+++ b/fs/bcachefs/subvolume_types.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SUBVOLUME_TYPES_H
+#define _BCACHEFS_SUBVOLUME_TYPES_H
+
+#include "darray.h"
+
+typedef DARRAY(u32) snapshot_id_list;
+
+#define IS_ANCESTOR_BITMAP 128
+
+struct snapshot_t {
+ u32 parent;
+ u32 skip[3];
+ u32 depth;
+ u32 children[2];
+ u32 subvol; /* Nonzero only if a subvolume points to this node: */
+ u32 tree;
+ u32 equiv;
+ unsigned long is_ancestor[BITS_TO_LONGS(IS_ANCESTOR_BITMAP)];
+};
+
+struct snapshot_table {
+ struct snapshot_t s[0];
+};
+
+typedef struct {
+ u32 subvol;
+ u64 inum;
+} subvol_inum;
+
+#endif /* _BCACHEFS_SUBVOLUME_TYPES_H */
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
new file mode 100644
index 000000000000..332d41e1c0a3
--- /dev/null
+++ b/fs/bcachefs/super-io.c
@@ -0,0 +1,1258 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "checksum.h"
+#include "counters.h"
+#include "disk_groups.h"
+#include "ec.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_sb.h"
+#include "journal_seq_blacklist.h"
+#include "recovery.h"
+#include "replicas.h"
+#include "quota.h"
+#include "sb-clean.h"
+#include "sb-members.h"
+#include "super-io.h"
+#include "super.h"
+#include "trace.h"
+#include "vstructs.h"
+
+#include <linux/backing-dev.h>
+#include <linux/sort.h>
+
+static const struct blk_holder_ops bch2_sb_handle_bdev_ops = {
+};
+
+struct bch2_metadata_version {
+ u16 version;
+ const char *name;
+ u64 recovery_passes;
+};
+
+static const struct bch2_metadata_version bch2_metadata_versions[] = {
+#define x(n, v, _recovery_passes) { \
+ .version = v, \
+ .name = #n, \
+ .recovery_passes = _recovery_passes, \
+},
+ BCH_METADATA_VERSIONS()
+#undef x
+};
+
+void bch2_version_to_text(struct printbuf *out, unsigned v)
+{
+ const char *str = "(unknown version)";
+
+ for (unsigned i = 0; i < ARRAY_SIZE(bch2_metadata_versions); i++)
+ if (bch2_metadata_versions[i].version == v) {
+ str = bch2_metadata_versions[i].name;
+ break;
+ }
+
+ prt_printf(out, "%u.%u: %s", BCH_VERSION_MAJOR(v), BCH_VERSION_MINOR(v), str);
+}
+
+unsigned bch2_latest_compatible_version(unsigned v)
+{
+ if (!BCH_VERSION_MAJOR(v))
+ return v;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(bch2_metadata_versions); i++)
+ if (bch2_metadata_versions[i].version > v &&
+ BCH_VERSION_MAJOR(bch2_metadata_versions[i].version) ==
+ BCH_VERSION_MAJOR(v))
+ v = bch2_metadata_versions[i].version;
+
+ return v;
+}
+
+u64 bch2_upgrade_recovery_passes(struct bch_fs *c,
+ unsigned old_version,
+ unsigned new_version)
+{
+ u64 ret = 0;
+
+ for (const struct bch2_metadata_version *i = bch2_metadata_versions;
+ i < bch2_metadata_versions + ARRAY_SIZE(bch2_metadata_versions);
+ i++)
+ if (i->version > old_version && i->version <= new_version) {
+ if (i->recovery_passes & RECOVERY_PASS_ALL_FSCK)
+ ret |= bch2_fsck_recovery_passes();
+ ret |= i->recovery_passes;
+ }
+
+ return ret &= ~RECOVERY_PASS_ALL_FSCK;
+}
+
+const char * const bch2_sb_fields[] = {
+#define x(name, nr) #name,
+ BCH_SB_FIELDS()
+#undef x
+ NULL
+};
+
+static int bch2_sb_field_validate(struct bch_sb *, struct bch_sb_field *,
+ struct printbuf *);
+
+struct bch_sb_field *bch2_sb_field_get_id(struct bch_sb *sb,
+ enum bch_sb_field_type type)
+{
+ struct bch_sb_field *f;
+
+ /* XXX: need locking around superblock to access optional fields */
+
+ vstruct_for_each(sb, f)
+ if (le32_to_cpu(f->type) == type)
+ return f;
+ return NULL;
+}
+
+static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb_handle *sb,
+ struct bch_sb_field *f,
+ unsigned u64s)
+{
+ unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
+ unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s;
+
+ BUG_ON(__vstruct_bytes(struct bch_sb, sb_u64s) > sb->buffer_size);
+
+ if (!f && !u64s) {
+ /* nothing to do: */
+ } else if (!f) {
+ f = vstruct_last(sb->sb);
+ memset(f, 0, sizeof(u64) * u64s);
+ f->u64s = cpu_to_le32(u64s);
+ f->type = 0;
+ } else {
+ void *src, *dst;
+
+ src = vstruct_end(f);
+
+ if (u64s) {
+ f->u64s = cpu_to_le32(u64s);
+ dst = vstruct_end(f);
+ } else {
+ dst = f;
+ }
+
+ memmove(dst, src, vstruct_end(sb->sb) - src);
+
+ if (dst > src)
+ memset(src, 0, dst - src);
+ }
+
+ sb->sb->u64s = cpu_to_le32(sb_u64s);
+
+ return u64s ? f : NULL;
+}
+
+void bch2_sb_field_delete(struct bch_sb_handle *sb,
+ enum bch_sb_field_type type)
+{
+ struct bch_sb_field *f = bch2_sb_field_get_id(sb->sb, type);
+
+ if (f)
+ __bch2_sb_field_resize(sb, f, 0);
+}
+
+/* Superblock realloc/free: */
+
+void bch2_free_super(struct bch_sb_handle *sb)
+{
+ kfree(sb->bio);
+ if (!IS_ERR_OR_NULL(sb->bdev))
+ blkdev_put(sb->bdev, sb->holder);
+ kfree(sb->holder);
+
+ kfree(sb->sb);
+ memset(sb, 0, sizeof(*sb));
+}
+
+int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
+{
+ size_t new_bytes = __vstruct_bytes(struct bch_sb, u64s);
+ size_t new_buffer_size;
+ struct bch_sb *new_sb;
+ struct bio *bio;
+
+ if (sb->bdev)
+ new_bytes = max_t(size_t, new_bytes, bdev_logical_block_size(sb->bdev));
+
+ new_buffer_size = roundup_pow_of_two(new_bytes);
+
+ if (sb->sb && sb->buffer_size >= new_buffer_size)
+ return 0;
+
+ if (sb->sb && sb->have_layout) {
+ u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
+
+ if (new_bytes > max_bytes) {
+ pr_err("%pg: superblock too big: want %zu but have %llu",
+ sb->bdev, new_bytes, max_bytes);
+ return -BCH_ERR_ENOSPC_sb;
+ }
+ }
+
+ if (sb->buffer_size >= new_buffer_size && sb->sb)
+ return 0;
+
+ if (dynamic_fault("bcachefs:add:super_realloc"))
+ return -BCH_ERR_ENOMEM_sb_realloc_injected;
+
+ new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
+ if (!new_sb)
+ return -BCH_ERR_ENOMEM_sb_buf_realloc;
+
+ sb->sb = new_sb;
+
+ if (sb->have_bio) {
+ unsigned nr_bvecs = buf_pages(sb->sb, new_buffer_size);
+
+ bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
+ if (!bio)
+ return -BCH_ERR_ENOMEM_sb_bio_realloc;
+
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0);
+
+ kfree(sb->bio);
+ sb->bio = bio;
+ }
+
+ sb->buffer_size = new_buffer_size;
+
+ return 0;
+}
+
+struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
+ enum bch_sb_field_type type,
+ unsigned u64s)
+{
+ struct bch_sb_field *f = bch2_sb_field_get_id(sb->sb, type);
+ ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
+ ssize_t d = -old_u64s + u64s;
+
+ if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
+ return NULL;
+
+ if (sb->fs_sb) {
+ struct bch_fs *c = container_of(sb, struct bch_fs, disk_sb);
+ struct bch_dev *ca;
+ unsigned i;
+
+ lockdep_assert_held(&c->sb_lock);
+
+ /* XXX: we're not checking that offline device have enough space */
+
+ for_each_online_member(ca, c, i) {
+ struct bch_sb_handle *dev_sb = &ca->disk_sb;
+
+ if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
+ percpu_ref_put(&ca->ref);
+ return NULL;
+ }
+ }
+ }
+
+ f = bch2_sb_field_get_id(sb->sb, type);
+ f = __bch2_sb_field_resize(sb, f, u64s);
+ if (f)
+ f->type = cpu_to_le32(type);
+ return f;
+}
+
+/* Superblock validate: */
+
+static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out)
+{
+ u64 offset, prev_offset, max_sectors;
+ unsigned i;
+
+ BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
+
+ if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) &&
+ !uuid_equal(&layout->magic, &BCHFS_MAGIC)) {
+ prt_printf(out, "Not a bcachefs superblock layout");
+ return -BCH_ERR_invalid_sb_layout;
+ }
+
+ if (layout->layout_type != 0) {
+ prt_printf(out, "Invalid superblock layout type %u",
+ layout->layout_type);
+ return -BCH_ERR_invalid_sb_layout_type;
+ }
+
+ if (!layout->nr_superblocks) {
+ prt_printf(out, "Invalid superblock layout: no superblocks");
+ return -BCH_ERR_invalid_sb_layout_nr_superblocks;
+ }
+
+ if (layout->nr_superblocks > ARRAY_SIZE(layout->sb_offset)) {
+ prt_printf(out, "Invalid superblock layout: too many superblocks");
+ return -BCH_ERR_invalid_sb_layout_nr_superblocks;
+ }
+
+ max_sectors = 1 << layout->sb_max_size_bits;
+
+ prev_offset = le64_to_cpu(layout->sb_offset[0]);
+
+ for (i = 1; i < layout->nr_superblocks; i++) {
+ offset = le64_to_cpu(layout->sb_offset[i]);
+
+ if (offset < prev_offset + max_sectors) {
+ prt_printf(out, "Invalid superblock layout: superblocks overlap\n"
+ " (sb %u ends at %llu next starts at %llu",
+ i - 1, prev_offset + max_sectors, offset);
+ return -BCH_ERR_invalid_sb_layout_superblocks_overlap;
+ }
+ prev_offset = offset;
+ }
+
+ return 0;
+}
+
+static int bch2_sb_compatible(struct bch_sb *sb, struct printbuf *out)
+{
+ u16 version = le16_to_cpu(sb->version);
+ u16 version_min = le16_to_cpu(sb->version_min);
+
+ if (!bch2_version_compatible(version)) {
+ prt_str(out, "Unsupported superblock version ");
+ bch2_version_to_text(out, version);
+ prt_str(out, " (min ");
+ bch2_version_to_text(out, bcachefs_metadata_version_min);
+ prt_str(out, ", max ");
+ bch2_version_to_text(out, bcachefs_metadata_version_current);
+ prt_str(out, ")");
+ return -BCH_ERR_invalid_sb_version;
+ }
+
+ if (!bch2_version_compatible(version_min)) {
+ prt_str(out, "Unsupported superblock version_min ");
+ bch2_version_to_text(out, version_min);
+ prt_str(out, " (min ");
+ bch2_version_to_text(out, bcachefs_metadata_version_min);
+ prt_str(out, ", max ");
+ bch2_version_to_text(out, bcachefs_metadata_version_current);
+ prt_str(out, ")");
+ return -BCH_ERR_invalid_sb_version;
+ }
+
+ if (version_min > version) {
+ prt_str(out, "Bad minimum version ");
+ bch2_version_to_text(out, version_min);
+ prt_str(out, ", greater than version field ");
+ bch2_version_to_text(out, version);
+ return -BCH_ERR_invalid_sb_version;
+ }
+
+ return 0;
+}
+
+static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
+ int rw)
+{
+ struct bch_sb *sb = disk_sb->sb;
+ struct bch_sb_field *f;
+ struct bch_sb_field_members_v1 *mi;
+ enum bch_opt_id opt_id;
+ u16 block_size;
+ int ret;
+
+ ret = bch2_sb_compatible(sb, out);
+ if (ret)
+ return ret;
+
+ if (sb->features[1] ||
+ (le64_to_cpu(sb->features[0]) & (~0ULL << BCH_FEATURE_NR))) {
+ prt_printf(out, "Filesystem has incompatible features");
+ return -BCH_ERR_invalid_sb_features;
+ }
+
+ block_size = le16_to_cpu(sb->block_size);
+
+ if (block_size > PAGE_SECTORS) {
+ prt_printf(out, "Block size too big (got %u, max %u)",
+ block_size, PAGE_SECTORS);
+ return -BCH_ERR_invalid_sb_block_size;
+ }
+
+ if (bch2_is_zero(sb->user_uuid.b, sizeof(sb->user_uuid))) {
+ prt_printf(out, "Bad user UUID (got zeroes)");
+ return -BCH_ERR_invalid_sb_uuid;
+ }
+
+ if (bch2_is_zero(sb->uuid.b, sizeof(sb->uuid))) {
+ prt_printf(out, "Bad internal UUID (got zeroes)");
+ return -BCH_ERR_invalid_sb_uuid;
+ }
+
+ if (!sb->nr_devices ||
+ sb->nr_devices > BCH_SB_MEMBERS_MAX) {
+ prt_printf(out, "Bad number of member devices %u (max %u)",
+ sb->nr_devices, BCH_SB_MEMBERS_MAX);
+ return -BCH_ERR_invalid_sb_too_many_members;
+ }
+
+ if (sb->dev_idx >= sb->nr_devices) {
+ prt_printf(out, "Bad dev_idx (got %u, nr_devices %u)",
+ sb->dev_idx, sb->nr_devices);
+ return -BCH_ERR_invalid_sb_dev_idx;
+ }
+
+ if (!sb->time_precision ||
+ le32_to_cpu(sb->time_precision) > NSEC_PER_SEC) {
+ prt_printf(out, "Invalid time precision: %u (min 1, max %lu)",
+ le32_to_cpu(sb->time_precision), NSEC_PER_SEC);
+ return -BCH_ERR_invalid_sb_time_precision;
+ }
+
+ if (rw == READ) {
+ /*
+ * Been seeing a bug where these are getting inexplicably
+ * zeroed, so we're now validating them, but we have to be
+ * careful not to preven people's filesystems from mounting:
+ */
+ if (!BCH_SB_JOURNAL_FLUSH_DELAY(sb))
+ SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
+ if (!BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
+ SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 1000);
+
+ if (!BCH_SB_VERSION_UPGRADE_COMPLETE(sb))
+ SET_BCH_SB_VERSION_UPGRADE_COMPLETE(sb, le16_to_cpu(sb->version));
+ }
+
+ for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
+ const struct bch_option *opt = bch2_opt_table + opt_id;
+
+ if (opt->get_sb != BCH2_NO_SB_OPT) {
+ u64 v = bch2_opt_from_sb(sb, opt_id);
+
+ prt_printf(out, "Invalid option ");
+ ret = bch2_opt_validate(opt, v, out);
+ if (ret)
+ return ret;
+
+ printbuf_reset(out);
+ }
+ }
+
+ /* validate layout */
+ ret = validate_sb_layout(&sb->layout, out);
+ if (ret)
+ return ret;
+
+ vstruct_for_each(sb, f) {
+ if (!f->u64s) {
+ prt_printf(out, "Invalid superblock: optional field with size 0 (type %u)",
+ le32_to_cpu(f->type));
+ return -BCH_ERR_invalid_sb_field_size;
+ }
+
+ if (vstruct_next(f) > vstruct_last(sb)) {
+ prt_printf(out, "Invalid superblock: optional field extends past end of superblock (type %u)",
+ le32_to_cpu(f->type));
+ return -BCH_ERR_invalid_sb_field_size;
+ }
+ }
+
+ /* members must be validated first: */
+ mi = bch2_sb_field_get(sb, members_v1);
+ if (!mi) {
+ prt_printf(out, "Invalid superblock: member info area missing");
+ return -BCH_ERR_invalid_sb_members_missing;
+ }
+
+ ret = bch2_sb_field_validate(sb, &mi->field, out);
+ if (ret)
+ return ret;
+
+ vstruct_for_each(sb, f) {
+ if (le32_to_cpu(f->type) == BCH_SB_FIELD_members_v1)
+ continue;
+
+ ret = bch2_sb_field_validate(sb, f, out);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* device open: */
+
+static void bch2_sb_update(struct bch_fs *c)
+{
+ struct bch_sb *src = c->disk_sb.sb;
+ struct bch_dev *ca;
+ unsigned i;
+
+ lockdep_assert_held(&c->sb_lock);
+
+ c->sb.uuid = src->uuid;
+ c->sb.user_uuid = src->user_uuid;
+ c->sb.version = le16_to_cpu(src->version);
+ c->sb.version_min = le16_to_cpu(src->version_min);
+ c->sb.version_upgrade_complete = BCH_SB_VERSION_UPGRADE_COMPLETE(src);
+ c->sb.nr_devices = src->nr_devices;
+ c->sb.clean = BCH_SB_CLEAN(src);
+ c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
+
+ c->sb.nsec_per_time_unit = le32_to_cpu(src->time_precision);
+ c->sb.time_units_per_sec = NSEC_PER_SEC / c->sb.nsec_per_time_unit;
+
+ /* XXX this is wrong, we need a 96 or 128 bit integer type */
+ c->sb.time_base_lo = div_u64(le64_to_cpu(src->time_base_lo),
+ c->sb.nsec_per_time_unit);
+ c->sb.time_base_hi = le32_to_cpu(src->time_base_hi);
+
+ c->sb.features = le64_to_cpu(src->features[0]);
+ c->sb.compat = le64_to_cpu(src->compat[0]);
+
+ for_each_member_device(ca, c, i) {
+ struct bch_member m = bch2_sb_member_get(src, i);
+ ca->mi = bch2_mi_to_cpu(&m);
+ }
+}
+
+static int __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
+{
+ struct bch_sb_field *src_f, *dst_f;
+ struct bch_sb *dst = dst_handle->sb;
+ unsigned i;
+
+ dst->version = src->version;
+ dst->version_min = src->version_min;
+ dst->seq = src->seq;
+ dst->uuid = src->uuid;
+ dst->user_uuid = src->user_uuid;
+ memcpy(dst->label, src->label, sizeof(dst->label));
+
+ dst->block_size = src->block_size;
+ dst->nr_devices = src->nr_devices;
+
+ dst->time_base_lo = src->time_base_lo;
+ dst->time_base_hi = src->time_base_hi;
+ dst->time_precision = src->time_precision;
+
+ memcpy(dst->flags, src->flags, sizeof(dst->flags));
+ memcpy(dst->features, src->features, sizeof(dst->features));
+ memcpy(dst->compat, src->compat, sizeof(dst->compat));
+
+ for (i = 0; i < BCH_SB_FIELD_NR; i++) {
+ int d;
+
+ if ((1U << i) & BCH_SINGLE_DEVICE_SB_FIELDS)
+ continue;
+
+ src_f = bch2_sb_field_get_id(src, i);
+ dst_f = bch2_sb_field_get_id(dst, i);
+
+ d = (src_f ? le32_to_cpu(src_f->u64s) : 0) -
+ (dst_f ? le32_to_cpu(dst_f->u64s) : 0);
+ if (d > 0) {
+ int ret = bch2_sb_realloc(dst_handle,
+ le32_to_cpu(dst_handle->sb->u64s) + d);
+
+ if (ret)
+ return ret;
+
+ dst = dst_handle->sb;
+ dst_f = bch2_sb_field_get_id(dst, i);
+ }
+
+ dst_f = __bch2_sb_field_resize(dst_handle, dst_f,
+ src_f ? le32_to_cpu(src_f->u64s) : 0);
+
+ if (src_f)
+ memcpy(dst_f, src_f, vstruct_bytes(src_f));
+ }
+
+ return 0;
+}
+
+int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
+{
+ int ret;
+
+ lockdep_assert_held(&c->sb_lock);
+
+ ret = bch2_sb_realloc(&c->disk_sb, 0) ?:
+ __copy_super(&c->disk_sb, src) ?:
+ bch2_sb_replicas_to_cpu_replicas(c) ?:
+ bch2_sb_disk_groups_to_cpu(c);
+ if (ret)
+ return ret;
+
+ bch2_sb_update(c);
+ return 0;
+}
+
+int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
+{
+ return __copy_super(&ca->disk_sb, c->disk_sb.sb);
+}
+
+/* read superblock: */
+
+static int read_one_super(struct bch_sb_handle *sb, u64 offset, struct printbuf *err)
+{
+ struct bch_csum csum;
+ size_t bytes;
+ int ret;
+reread:
+ bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
+ sb->bio->bi_iter.bi_sector = offset;
+ bch2_bio_map(sb->bio, sb->sb, sb->buffer_size);
+
+ ret = submit_bio_wait(sb->bio);
+ if (ret) {
+ prt_printf(err, "IO error: %i", ret);
+ return ret;
+ }
+
+ if (!uuid_equal(&sb->sb->magic, &BCACHE_MAGIC) &&
+ !uuid_equal(&sb->sb->magic, &BCHFS_MAGIC)) {
+ prt_printf(err, "Not a bcachefs superblock");
+ return -BCH_ERR_invalid_sb_magic;
+ }
+
+ ret = bch2_sb_compatible(sb->sb, err);
+ if (ret)
+ return ret;
+
+ bytes = vstruct_bytes(sb->sb);
+
+ if (bytes > 512 << sb->sb->layout.sb_max_size_bits) {
+ prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %lu)",
+ bytes, 512UL << sb->sb->layout.sb_max_size_bits);
+ return -BCH_ERR_invalid_sb_too_big;
+ }
+
+ if (bytes > sb->buffer_size) {
+ ret = bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s));
+ if (ret)
+ return ret;
+ goto reread;
+ }
+
+ if (BCH_SB_CSUM_TYPE(sb->sb) >= BCH_CSUM_NR) {
+ prt_printf(err, "unknown checksum type %llu", BCH_SB_CSUM_TYPE(sb->sb));
+ return -BCH_ERR_invalid_sb_csum_type;
+ }
+
+ /* XXX: verify MACs */
+ csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
+ null_nonce(), sb->sb);
+
+ if (bch2_crc_cmp(csum, sb->sb->csum)) {
+ prt_printf(err, "bad checksum");
+ return -BCH_ERR_invalid_sb_csum;
+ }
+
+ sb->seq = le64_to_cpu(sb->sb->seq);
+
+ return 0;
+}
+
+int bch2_read_super(const char *path, struct bch_opts *opts,
+ struct bch_sb_handle *sb)
+{
+ u64 offset = opt_get(*opts, sb);
+ struct bch_sb_layout layout;
+ struct printbuf err = PRINTBUF;
+ __le64 *i;
+ int ret;
+#ifndef __KERNEL__
+retry:
+#endif
+ memset(sb, 0, sizeof(*sb));
+ sb->mode = BLK_OPEN_READ;
+ sb->have_bio = true;
+ sb->holder = kmalloc(1, GFP_KERNEL);
+ if (!sb->holder)
+ return -ENOMEM;
+
+#ifndef __KERNEL__
+ if (opt_get(*opts, direct_io) == false)
+ sb->mode |= BLK_OPEN_BUFFERED;
+#endif
+
+ if (!opt_get(*opts, noexcl))
+ sb->mode |= BLK_OPEN_EXCL;
+
+ if (!opt_get(*opts, nochanges))
+ sb->mode |= BLK_OPEN_WRITE;
+
+ sb->bdev = blkdev_get_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
+ if (IS_ERR(sb->bdev) &&
+ PTR_ERR(sb->bdev) == -EACCES &&
+ opt_get(*opts, read_only)) {
+ sb->mode &= ~BLK_OPEN_WRITE;
+
+ sb->bdev = blkdev_get_by_path(path, sb->mode, sb->holder, &bch2_sb_handle_bdev_ops);
+ if (!IS_ERR(sb->bdev))
+ opt_set(*opts, nochanges, true);
+ }
+
+ if (IS_ERR(sb->bdev)) {
+ ret = PTR_ERR(sb->bdev);
+ goto out;
+ }
+
+ ret = bch2_sb_realloc(sb, 0);
+ if (ret) {
+ prt_printf(&err, "error allocating memory for superblock");
+ goto err;
+ }
+
+ if (bch2_fs_init_fault("read_super")) {
+ prt_printf(&err, "dynamic fault");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = read_one_super(sb, offset, &err);
+ if (!ret)
+ goto got_super;
+
+ if (opt_defined(*opts, sb))
+ goto err;
+
+ printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s",
+ path, err.buf);
+ printbuf_reset(&err);
+
+ /*
+ * Error reading primary superblock - read location of backup
+ * superblocks:
+ */
+ bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
+ sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
+ /*
+ * use sb buffer to read layout, since sb buffer is page aligned but
+ * layout won't be:
+ */
+ bch2_bio_map(sb->bio, sb->sb, sizeof(struct bch_sb_layout));
+
+ ret = submit_bio_wait(sb->bio);
+ if (ret) {
+ prt_printf(&err, "IO error: %i", ret);
+ goto err;
+ }
+
+ memcpy(&layout, sb->sb, sizeof(layout));
+ ret = validate_sb_layout(&layout, &err);
+ if (ret)
+ goto err;
+
+ for (i = layout.sb_offset;
+ i < layout.sb_offset + layout.nr_superblocks; i++) {
+ offset = le64_to_cpu(*i);
+
+ if (offset == opt_get(*opts, sb))
+ continue;
+
+ ret = read_one_super(sb, offset, &err);
+ if (!ret)
+ goto got_super;
+ }
+
+ goto err;
+
+got_super:
+ if (le16_to_cpu(sb->sb->block_size) << 9 <
+ bdev_logical_block_size(sb->bdev) &&
+ opt_get(*opts, direct_io)) {
+#ifndef __KERNEL__
+ opt_set(*opts, direct_io, false);
+ bch2_free_super(sb);
+ goto retry;
+#endif
+ prt_printf(&err, "block size (%u) smaller than device block size (%u)",
+ le16_to_cpu(sb->sb->block_size) << 9,
+ bdev_logical_block_size(sb->bdev));
+ ret = -BCH_ERR_block_size_too_small;
+ goto err;
+ }
+
+ ret = 0;
+ sb->have_layout = true;
+
+ ret = bch2_sb_validate(sb, &err, READ);
+ if (ret) {
+ printk(KERN_ERR "bcachefs (%s): error validating superblock: %s",
+ path, err.buf);
+ goto err_no_print;
+ }
+out:
+ printbuf_exit(&err);
+ return ret;
+err:
+ printk(KERN_ERR "bcachefs (%s): error reading superblock: %s",
+ path, err.buf);
+err_no_print:
+ bch2_free_super(sb);
+ goto out;
+}
+
+/* write superblock: */
+
+static void write_super_endio(struct bio *bio)
+{
+ struct bch_dev *ca = bio->bi_private;
+
+ /* XXX: return errors directly */
+
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write error: %s",
+ bch2_blk_status_to_str(bio->bi_status)))
+ ca->sb_write_error = 1;
+
+ closure_put(&ca->fs->sb_write);
+ percpu_ref_put(&ca->io_ref);
+}
+
+static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct bch_sb *sb = ca->disk_sb.sb;
+ struct bio *bio = ca->disk_sb.bio;
+
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
+ bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]);
+ bio->bi_end_io = write_super_endio;
+ bio->bi_private = ca;
+ bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
+
+ this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
+ bio_sectors(bio));
+
+ percpu_ref_get(&ca->io_ref);
+ closure_bio_submit(bio, &c->sb_write);
+}
+
+static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
+{
+ struct bch_sb *sb = ca->disk_sb.sb;
+ struct bio *bio = ca->disk_sb.bio;
+
+ sb->offset = sb->layout.sb_offset[idx];
+
+ SET_BCH_SB_CSUM_TYPE(sb, bch2_csum_opt_to_type(c->opts.metadata_checksum, false));
+ sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
+ null_nonce(), sb);
+
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
+ bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
+ bio->bi_end_io = write_super_endio;
+ bio->bi_private = ca;
+ bch2_bio_map(bio, sb,
+ roundup((size_t) vstruct_bytes(sb),
+ bdev_logical_block_size(ca->disk_sb.bdev)));
+
+ this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
+ bio_sectors(bio));
+
+ percpu_ref_get(&ca->io_ref);
+ closure_bio_submit(bio, &c->sb_write);
+}
+
+int bch2_write_super(struct bch_fs *c)
+{
+ struct closure *cl = &c->sb_write;
+ struct bch_dev *ca;
+ struct printbuf err = PRINTBUF;
+ unsigned i, sb = 0, nr_wrote;
+ struct bch_devs_mask sb_written;
+ bool wrote, can_mount_without_written, can_mount_with_written;
+ unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
+ int ret = 0;
+
+ trace_and_count(c, write_super, c, _RET_IP_);
+
+ if (c->opts.very_degraded)
+ degraded_flags |= BCH_FORCE_IF_LOST;
+
+ lockdep_assert_held(&c->sb_lock);
+
+ closure_init_stack(cl);
+ memset(&sb_written, 0, sizeof(sb_written));
+
+ /* Make sure we're using the new magic numbers: */
+ c->disk_sb.sb->magic = BCHFS_MAGIC;
+ c->disk_sb.sb->layout.magic = BCHFS_MAGIC;
+
+ le64_add_cpu(&c->disk_sb.sb->seq, 1);
+
+ if (test_bit(BCH_FS_ERROR, &c->flags))
+ SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1);
+ if (test_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags))
+ SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 1);
+
+ SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN);
+
+ bch2_sb_counters_from_cpu(c);
+ bch_members_cpy_v2_v1(&c->disk_sb);
+
+ for_each_online_member(ca, c, i)
+ bch2_sb_from_fs(c, ca);
+
+ for_each_online_member(ca, c, i) {
+ printbuf_reset(&err);
+
+ ret = bch2_sb_validate(&ca->disk_sb, &err, WRITE);
+ if (ret) {
+ bch2_fs_inconsistent(c, "sb invalid before write: %s", err.buf);
+ percpu_ref_put(&ca->io_ref);
+ goto out;
+ }
+ }
+
+ if (c->opts.nochanges)
+ goto out;
+
+ /*
+ * Defer writing the superblock until filesystem initialization is
+ * complete - don't write out a partly initialized superblock:
+ */
+ if (!BCH_SB_INITIALIZED(c->disk_sb.sb))
+ goto out;
+
+ for_each_online_member(ca, c, i) {
+ __set_bit(ca->dev_idx, sb_written.d);
+ ca->sb_write_error = 0;
+ }
+
+ for_each_online_member(ca, c, i)
+ read_back_super(c, ca);
+ closure_sync(cl);
+
+ for_each_online_member(ca, c, i) {
+ if (ca->sb_write_error)
+ continue;
+
+ if (le64_to_cpu(ca->sb_read_scratch->seq) < ca->disk_sb.seq) {
+ bch2_fs_fatal_error(c,
+ "Superblock write was silently dropped! (seq %llu expected %llu)",
+ le64_to_cpu(ca->sb_read_scratch->seq),
+ ca->disk_sb.seq);
+ percpu_ref_put(&ca->io_ref);
+ ret = -BCH_ERR_erofs_sb_err;
+ goto out;
+ }
+
+ if (le64_to_cpu(ca->sb_read_scratch->seq) > ca->disk_sb.seq) {
+ bch2_fs_fatal_error(c,
+ "Superblock modified by another process (seq %llu expected %llu)",
+ le64_to_cpu(ca->sb_read_scratch->seq),
+ ca->disk_sb.seq);
+ percpu_ref_put(&ca->io_ref);
+ ret = -BCH_ERR_erofs_sb_err;
+ goto out;
+ }
+ }
+
+ do {
+ wrote = false;
+ for_each_online_member(ca, c, i)
+ if (!ca->sb_write_error &&
+ sb < ca->disk_sb.sb->layout.nr_superblocks) {
+ write_one_super(c, ca, sb);
+ wrote = true;
+ }
+ closure_sync(cl);
+ sb++;
+ } while (wrote);
+
+ for_each_online_member(ca, c, i) {
+ if (ca->sb_write_error)
+ __clear_bit(ca->dev_idx, sb_written.d);
+ else
+ ca->disk_sb.seq = le64_to_cpu(ca->disk_sb.sb->seq);
+ }
+
+ nr_wrote = dev_mask_nr(&sb_written);
+
+ can_mount_with_written =
+ bch2_have_enough_devs(c, sb_written, degraded_flags, false);
+
+ for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
+ sb_written.d[i] = ~sb_written.d[i];
+
+ can_mount_without_written =
+ bch2_have_enough_devs(c, sb_written, degraded_flags, false);
+
+ /*
+ * If we would be able to mount _without_ the devices we successfully
+ * wrote superblocks to, we weren't able to write to enough devices:
+ *
+ * Exception: if we can mount without the successes because we haven't
+ * written anything (new filesystem), we continue if we'd be able to
+ * mount with the devices we did successfully write to:
+ */
+ if (bch2_fs_fatal_err_on(!nr_wrote ||
+ !can_mount_with_written ||
+ (can_mount_without_written &&
+ !can_mount_with_written), c,
+ "Unable to write superblock to sufficient devices (from %ps)",
+ (void *) _RET_IP_))
+ ret = -1;
+out:
+ /* Make new options visible after they're persistent: */
+ bch2_sb_update(c);
+ printbuf_exit(&err);
+ return ret;
+}
+
+void __bch2_check_set_feature(struct bch_fs *c, unsigned feat)
+{
+ mutex_lock(&c->sb_lock);
+ if (!(c->sb.features & (1ULL << feat))) {
+ c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << feat);
+
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+}
+
+/* Downgrade if superblock is at a higher version than currently supported: */
+void bch2_sb_maybe_downgrade(struct bch_fs *c)
+{
+ lockdep_assert_held(&c->sb_lock);
+
+ /*
+ * Downgrade, if superblock is at a higher version than currently
+ * supported:
+ */
+ if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
+ SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
+ if (c->sb.version > bcachefs_metadata_version_current)
+ c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
+ if (c->sb.version_min > bcachefs_metadata_version_current)
+ c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
+ c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
+}
+
+void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version)
+{
+ lockdep_assert_held(&c->sb_lock);
+
+ c->disk_sb.sb->version = cpu_to_le16(new_version);
+ c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
+}
+
+static const struct bch_sb_field_ops *bch2_sb_field_ops[] = {
+#define x(f, nr) \
+ [BCH_SB_FIELD_##f] = &bch_sb_field_ops_##f,
+ BCH_SB_FIELDS()
+#undef x
+};
+
+static const struct bch_sb_field_ops bch2_sb_field_null_ops;
+
+static const struct bch_sb_field_ops *bch2_sb_field_type_ops(unsigned type)
+{
+ return likely(type < ARRAY_SIZE(bch2_sb_field_ops))
+ ? bch2_sb_field_ops[type]
+ : &bch2_sb_field_null_ops;
+}
+
+static int bch2_sb_field_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ struct printbuf *err)
+{
+ unsigned type = le32_to_cpu(f->type);
+ struct printbuf field_err = PRINTBUF;
+ const struct bch_sb_field_ops *ops = bch2_sb_field_type_ops(type);
+ int ret;
+
+ ret = ops->validate ? ops->validate(sb, f, &field_err) : 0;
+ if (ret) {
+ prt_printf(err, "Invalid superblock section %s: %s",
+ bch2_sb_fields[type], field_err.buf);
+ prt_newline(err);
+ bch2_sb_field_to_text(err, sb, f);
+ }
+
+ printbuf_exit(&field_err);
+ return ret;
+}
+
+void bch2_sb_field_to_text(struct printbuf *out, struct bch_sb *sb,
+ struct bch_sb_field *f)
+{
+ unsigned type = le32_to_cpu(f->type);
+ const struct bch_sb_field_ops *ops = bch2_sb_field_type_ops(type);
+
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 32);
+
+ if (type < BCH_SB_FIELD_NR)
+ prt_printf(out, "%s", bch2_sb_fields[type]);
+ else
+ prt_printf(out, "(unknown field %u)", type);
+
+ prt_printf(out, " (size %zu):", vstruct_bytes(f));
+ prt_newline(out);
+
+ if (ops->to_text) {
+ printbuf_indent_add(out, 2);
+ ops->to_text(out, sb, f);
+ printbuf_indent_sub(out, 2);
+ }
+}
+
+void bch2_sb_layout_to_text(struct printbuf *out, struct bch_sb_layout *l)
+{
+ unsigned i;
+
+ prt_printf(out, "Type: %u", l->layout_type);
+ prt_newline(out);
+
+ prt_str(out, "Superblock max size: ");
+ prt_units_u64(out, 512 << l->sb_max_size_bits);
+ prt_newline(out);
+
+ prt_printf(out, "Nr superblocks: %u", l->nr_superblocks);
+ prt_newline(out);
+
+ prt_str(out, "Offsets: ");
+ for (i = 0; i < l->nr_superblocks; i++) {
+ if (i)
+ prt_str(out, ", ");
+ prt_printf(out, "%llu", le64_to_cpu(l->sb_offset[i]));
+ }
+ prt_newline(out);
+}
+
+void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
+ bool print_layout, unsigned fields)
+{
+ struct bch_sb_field *f;
+ u64 fields_have = 0;
+ unsigned nr_devices = 0;
+
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 44);
+
+ for (int i = 0; i < sb->nr_devices; i++)
+ nr_devices += bch2_dev_exists(sb, i);
+
+ prt_printf(out, "External UUID:");
+ prt_tab(out);
+ pr_uuid(out, sb->user_uuid.b);
+ prt_newline(out);
+
+ prt_printf(out, "Internal UUID:");
+ prt_tab(out);
+ pr_uuid(out, sb->uuid.b);
+ prt_newline(out);
+
+ prt_str(out, "Device index:");
+ prt_tab(out);
+ prt_printf(out, "%u", sb->dev_idx);
+ prt_newline(out);
+
+ prt_str(out, "Label:");
+ prt_tab(out);
+ prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label);
+ prt_newline(out);
+
+ prt_str(out, "Version:");
+ prt_tab(out);
+ bch2_version_to_text(out, le16_to_cpu(sb->version));
+ prt_newline(out);
+
+ prt_str(out, "Version upgrade complete:");
+ prt_tab(out);
+ bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb));
+ prt_newline(out);
+
+ prt_printf(out, "Oldest version on disk:");
+ prt_tab(out);
+ bch2_version_to_text(out, le16_to_cpu(sb->version_min));
+ prt_newline(out);
+
+ prt_printf(out, "Created:");
+ prt_tab(out);
+ if (sb->time_base_lo)
+ pr_time(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
+ else
+ prt_printf(out, "(not set)");
+ prt_newline(out);
+
+ prt_printf(out, "Sequence number:");
+ prt_tab(out);
+ prt_printf(out, "%llu", le64_to_cpu(sb->seq));
+ prt_newline(out);
+
+ prt_printf(out, "Superblock size:");
+ prt_tab(out);
+ prt_printf(out, "%zu", vstruct_bytes(sb));
+ prt_newline(out);
+
+ prt_printf(out, "Clean:");
+ prt_tab(out);
+ prt_printf(out, "%llu", BCH_SB_CLEAN(sb));
+ prt_newline(out);
+
+ prt_printf(out, "Devices:");
+ prt_tab(out);
+ prt_printf(out, "%u", nr_devices);
+ prt_newline(out);
+
+ prt_printf(out, "Sections:");
+ vstruct_for_each(sb, f)
+ fields_have |= 1 << le32_to_cpu(f->type);
+ prt_tab(out);
+ prt_bitflags(out, bch2_sb_fields, fields_have);
+ prt_newline(out);
+
+ prt_printf(out, "Features:");
+ prt_tab(out);
+ prt_bitflags(out, bch2_sb_features, le64_to_cpu(sb->features[0]));
+ prt_newline(out);
+
+ prt_printf(out, "Compat features:");
+ prt_tab(out);
+ prt_bitflags(out, bch2_sb_compat, le64_to_cpu(sb->compat[0]));
+ prt_newline(out);
+
+ prt_newline(out);
+ prt_printf(out, "Options:");
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+ {
+ enum bch_opt_id id;
+
+ for (id = 0; id < bch2_opts_nr; id++) {
+ const struct bch_option *opt = bch2_opt_table + id;
+
+ if (opt->get_sb != BCH2_NO_SB_OPT) {
+ u64 v = bch2_opt_from_sb(sb, id);
+
+ prt_printf(out, "%s:", opt->attr.name);
+ prt_tab(out);
+ bch2_opt_to_text(out, NULL, sb, opt, v,
+ OPT_HUMAN_READABLE|OPT_SHOW_FULL_LIST);
+ prt_newline(out);
+ }
+ }
+ }
+
+ printbuf_indent_sub(out, 2);
+
+ if (print_layout) {
+ prt_newline(out);
+ prt_printf(out, "layout:");
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+ bch2_sb_layout_to_text(out, &sb->layout);
+ printbuf_indent_sub(out, 2);
+ }
+
+ vstruct_for_each(sb, f)
+ if (fields & (1 << le32_to_cpu(f->type))) {
+ prt_newline(out);
+ bch2_sb_field_to_text(out, sb, f);
+ }
+}
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
new file mode 100644
index 000000000000..b0d8584f475f
--- /dev/null
+++ b/fs/bcachefs/super-io.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SUPER_IO_H
+#define _BCACHEFS_SUPER_IO_H
+
+#include "extents.h"
+#include "eytzinger.h"
+#include "super_types.h"
+#include "super.h"
+#include "sb-members.h"
+
+#include <asm/byteorder.h>
+
+static inline bool bch2_version_compatible(u16 version)
+{
+ return BCH_VERSION_MAJOR(version) <= BCH_VERSION_MAJOR(bcachefs_metadata_version_current) &&
+ version >= bcachefs_metadata_version_min;
+}
+
+void bch2_version_to_text(struct printbuf *, unsigned);
+unsigned bch2_latest_compatible_version(unsigned);
+
+u64 bch2_upgrade_recovery_passes(struct bch_fs *c,
+ unsigned,
+ unsigned);
+
+#define field_to_type(_f, _name) \
+ container_of_or_null(_f, struct bch_sb_field_##_name, field)
+
+struct bch_sb_field *bch2_sb_field_get_id(struct bch_sb *, enum bch_sb_field_type);
+#define bch2_sb_field_get(_sb, _name) \
+ field_to_type(bch2_sb_field_get_id(_sb, BCH_SB_FIELD_##_name), _name)
+
+struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *,
+ enum bch_sb_field_type, unsigned);
+#define bch2_sb_field_resize(_sb, _name, _u64s) \
+ field_to_type(bch2_sb_field_resize_id(_sb, BCH_SB_FIELD_##_name, _u64s), _name)
+
+void bch2_sb_field_delete(struct bch_sb_handle *, enum bch_sb_field_type);
+
+extern const char * const bch2_sb_fields[];
+
+struct bch_sb_field_ops {
+ int (*validate)(struct bch_sb *, struct bch_sb_field *, struct printbuf *);
+ void (*to_text)(struct printbuf *, struct bch_sb *, struct bch_sb_field *);
+};
+
+static inline __le64 bch2_sb_magic(struct bch_fs *c)
+{
+ __le64 ret;
+
+ memcpy(&ret, &c->sb.uuid, sizeof(ret));
+ return ret;
+}
+
+static inline __u64 jset_magic(struct bch_fs *c)
+{
+ return __le64_to_cpu(bch2_sb_magic(c) ^ JSET_MAGIC);
+}
+
+static inline __u64 bset_magic(struct bch_fs *c)
+{
+ return __le64_to_cpu(bch2_sb_magic(c) ^ BSET_MAGIC);
+}
+
+int bch2_sb_to_fs(struct bch_fs *, struct bch_sb *);
+int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *);
+
+void bch2_free_super(struct bch_sb_handle *);
+int bch2_sb_realloc(struct bch_sb_handle *, unsigned);
+
+int bch2_read_super(const char *, struct bch_opts *, struct bch_sb_handle *);
+int bch2_write_super(struct bch_fs *);
+void __bch2_check_set_feature(struct bch_fs *, unsigned);
+
+static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat)
+{
+ if (!(c->sb.features & (1ULL << feat)))
+ __bch2_check_set_feature(c, feat);
+}
+
+/* BCH_SB_FIELD_members_v1: */
+
+static inline bool bch2_member_exists(struct bch_member *m)
+{
+ return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
+}
+
+static inline bool bch2_dev_exists(struct bch_sb *sb,
+ unsigned dev)
+{
+ if (dev < sb->nr_devices) {
+ struct bch_member m = bch2_sb_member_get(sb, dev);
+ return bch2_member_exists(&m);
+ }
+ return false;
+}
+
+static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
+{
+ return (struct bch_member_cpu) {
+ .nbuckets = le64_to_cpu(mi->nbuckets),
+ .first_bucket = le16_to_cpu(mi->first_bucket),
+ .bucket_size = le16_to_cpu(mi->bucket_size),
+ .group = BCH_MEMBER_GROUP(mi),
+ .state = BCH_MEMBER_STATE(mi),
+ .discard = BCH_MEMBER_DISCARD(mi),
+ .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi),
+ .durability = BCH_MEMBER_DURABILITY(mi)
+ ? BCH_MEMBER_DURABILITY(mi) - 1
+ : 1,
+ .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
+ .valid = bch2_member_exists(mi),
+ };
+}
+
+void bch2_sb_maybe_downgrade(struct bch_fs *);
+void bch2_sb_upgrade(struct bch_fs *, unsigned);
+
+void bch2_sb_field_to_text(struct printbuf *, struct bch_sb *,
+ struct bch_sb_field *);
+void bch2_sb_layout_to_text(struct printbuf *, struct bch_sb_layout *);
+void bch2_sb_to_text(struct printbuf *, struct bch_sb *, bool, unsigned);
+
+#endif /* _BCACHEFS_SUPER_IO_H */
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
new file mode 100644
index 000000000000..0e85c22672be
--- /dev/null
+++ b/fs/bcachefs/super.c
@@ -0,0 +1,2022 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bcachefs setup/teardown code, and some metadata io - read a superblock and
+ * figure out what to do with it.
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "bkey_sort.h"
+#include "btree_cache.h"
+#include "btree_gc.h"
+#include "btree_journal_iter.h"
+#include "btree_key_cache.h"
+#include "btree_update_interior.h"
+#include "btree_io.h"
+#include "btree_write_buffer.h"
+#include "buckets_waiting_for_journal.h"
+#include "chardev.h"
+#include "checksum.h"
+#include "clock.h"
+#include "compress.h"
+#include "counters.h"
+#include "debug.h"
+#include "disk_groups.h"
+#include "ec.h"
+#include "errcode.h"
+#include "error.h"
+#include "fs.h"
+#include "fs-io.h"
+#include "fs-io-buffered.h"
+#include "fs-io-direct.h"
+#include "fsck.h"
+#include "inode.h"
+#include "io_read.h"
+#include "io_write.h"
+#include "journal.h"
+#include "journal_reclaim.h"
+#include "journal_seq_blacklist.h"
+#include "move.h"
+#include "migrate.h"
+#include "movinggc.h"
+#include "nocow_locking.h"
+#include "quota.h"
+#include "rebalance.h"
+#include "recovery.h"
+#include "replicas.h"
+#include "sb-clean.h"
+#include "sb-members.h"
+#include "snapshot.h"
+#include "subvolume.h"
+#include "super.h"
+#include "super-io.h"
+#include "sysfs.h"
+#include "trace.h"
+
+#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/random.h>
+#include <linux/sysfs.h>
+#include <crypto/hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
+MODULE_DESCRIPTION("bcachefs filesystem");
+
+#define KTYPE(type) \
+static const struct attribute_group type ## _group = { \
+ .attrs = type ## _files \
+}; \
+ \
+static const struct attribute_group *type ## _groups[] = { \
+ &type ## _group, \
+ NULL \
+}; \
+ \
+static const struct kobj_type type ## _ktype = { \
+ .release = type ## _release, \
+ .sysfs_ops = &type ## _sysfs_ops, \
+ .default_groups = type ## _groups \
+}
+
+static void bch2_fs_release(struct kobject *);
+static void bch2_dev_release(struct kobject *);
+static void bch2_fs_counters_release(struct kobject *k)
+{
+}
+
+static void bch2_fs_internal_release(struct kobject *k)
+{
+}
+
+static void bch2_fs_opts_dir_release(struct kobject *k)
+{
+}
+
+static void bch2_fs_time_stats_release(struct kobject *k)
+{
+}
+
+KTYPE(bch2_fs);
+KTYPE(bch2_fs_counters);
+KTYPE(bch2_fs_internal);
+KTYPE(bch2_fs_opts_dir);
+KTYPE(bch2_fs_time_stats);
+KTYPE(bch2_dev);
+
+static struct kset *bcachefs_kset;
+static LIST_HEAD(bch_fs_list);
+static DEFINE_MUTEX(bch_fs_list_lock);
+
+DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
+
+static void bch2_dev_free(struct bch_dev *);
+static int bch2_dev_alloc(struct bch_fs *, unsigned);
+static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
+static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
+
+struct bch_fs *bch2_dev_to_fs(dev_t dev)
+{
+ struct bch_fs *c;
+ struct bch_dev *ca;
+ unsigned i;
+
+ mutex_lock(&bch_fs_list_lock);
+ rcu_read_lock();
+
+ list_for_each_entry(c, &bch_fs_list, list)
+ for_each_member_device_rcu(ca, c, i, NULL)
+ if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
+ closure_get(&c->cl);
+ goto found;
+ }
+ c = NULL;
+found:
+ rcu_read_unlock();
+ mutex_unlock(&bch_fs_list_lock);
+
+ return c;
+}
+
+static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
+{
+ struct bch_fs *c;
+
+ lockdep_assert_held(&bch_fs_list_lock);
+
+ list_for_each_entry(c, &bch_fs_list, list)
+ if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
+ return c;
+
+ return NULL;
+}
+
+struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
+{
+ struct bch_fs *c;
+
+ mutex_lock(&bch_fs_list_lock);
+ c = __bch2_uuid_to_fs(uuid);
+ if (c)
+ closure_get(&c->cl);
+ mutex_unlock(&bch_fs_list_lock);
+
+ return c;
+}
+
+static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i, nr = 0, u64s =
+ ((sizeof(struct jset_entry_dev_usage) +
+ sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
+ sizeof(u64);
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL)
+ nr++;
+ rcu_read_unlock();
+
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->dev_usage_journal_res, u64s * nr);
+}
+
+/* Filesystem RO/RW: */
+
+/*
+ * For startup/shutdown of RW stuff, the dependencies are:
+ *
+ * - foreground writes depend on copygc and rebalance (to free up space)
+ *
+ * - copygc and rebalance depend on mark and sweep gc (they actually probably
+ * don't because they either reserve ahead of time or don't block if
+ * allocations fail, but allocations can require mark and sweep gc to run
+ * because of generation number wraparound)
+ *
+ * - all of the above depends on the allocator threads
+ *
+ * - allocator depends on the journal (when it rewrites prios and gens)
+ */
+
+static void __bch2_fs_read_only(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i, clean_passes = 0;
+ u64 seq = 0;
+
+ bch2_fs_ec_stop(c);
+ bch2_open_buckets_stop(c, NULL, true);
+ bch2_rebalance_stop(c);
+ bch2_copygc_stop(c);
+ bch2_gc_thread_stop(c);
+ bch2_fs_ec_flush(c);
+
+ bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
+ journal_cur_seq(&c->journal));
+
+ do {
+ clean_passes++;
+
+ if (bch2_btree_interior_updates_flush(c) ||
+ bch2_journal_flush_all_pins(&c->journal) ||
+ bch2_btree_flush_all_writes(c) ||
+ seq != atomic64_read(&c->journal.seq)) {
+ seq = atomic64_read(&c->journal.seq);
+ clean_passes = 0;
+ }
+ } while (clean_passes < 2);
+
+ bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
+ journal_cur_seq(&c->journal));
+
+ if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
+ !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
+ set_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+ bch2_fs_journal_stop(&c->journal);
+
+ /*
+ * After stopping journal:
+ */
+ for_each_member_device(ca, c, i)
+ bch2_dev_allocator_remove(c, ca);
+}
+
+#ifndef BCH_WRITE_REF_DEBUG
+static void bch2_writes_disabled(struct percpu_ref *writes)
+{
+ struct bch_fs *c = container_of(writes, struct bch_fs, writes);
+
+ set_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
+ wake_up(&bch2_read_only_wait);
+}
+#endif
+
+void bch2_fs_read_only(struct bch_fs *c)
+{
+ if (!test_bit(BCH_FS_RW, &c->flags)) {
+ bch2_journal_reclaim_stop(&c->journal);
+ return;
+ }
+
+ BUG_ON(test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+
+ /*
+ * Block new foreground-end write operations from starting - any new
+ * writes will return -EROFS:
+ */
+ set_bit(BCH_FS_GOING_RO, &c->flags);
+#ifndef BCH_WRITE_REF_DEBUG
+ percpu_ref_kill(&c->writes);
+#else
+ for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
+ bch2_write_ref_put(c, i);
+#endif
+
+ /*
+ * If we're not doing an emergency shutdown, we want to wait on
+ * outstanding writes to complete so they don't see spurious errors due
+ * to shutting down the allocator:
+ *
+ * If we are doing an emergency shutdown outstanding writes may
+ * hang until we shutdown the allocator so we don't want to wait
+ * on outstanding writes before shutting everything down - but
+ * we do need to wait on them before returning and signalling
+ * that going RO is complete:
+ */
+ wait_event(bch2_read_only_wait,
+ test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
+ test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
+
+ __bch2_fs_read_only(c);
+
+ wait_event(bch2_read_only_wait,
+ test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
+
+ clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
+ clear_bit(BCH_FS_GOING_RO, &c->flags);
+
+ if (!bch2_journal_error(&c->journal) &&
+ !test_bit(BCH_FS_ERROR, &c->flags) &&
+ !test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
+ test_bit(BCH_FS_STARTED, &c->flags) &&
+ test_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags) &&
+ !c->opts.norecovery) {
+ BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
+ BUG_ON(atomic_read(&c->btree_cache.dirty));
+ BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
+ BUG_ON(c->btree_write_buffer.state.nr);
+
+ bch_verbose(c, "marking filesystem clean");
+ bch2_fs_mark_clean(c);
+ }
+
+ clear_bit(BCH_FS_RW, &c->flags);
+}
+
+static void bch2_fs_read_only_work(struct work_struct *work)
+{
+ struct bch_fs *c =
+ container_of(work, struct bch_fs, read_only_work);
+
+ down_write(&c->state_lock);
+ bch2_fs_read_only(c);
+ up_write(&c->state_lock);
+}
+
+static void bch2_fs_read_only_async(struct bch_fs *c)
+{
+ queue_work(system_long_wq, &c->read_only_work);
+}
+
+bool bch2_fs_emergency_read_only(struct bch_fs *c)
+{
+ bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
+
+ bch2_journal_halt(&c->journal);
+ bch2_fs_read_only_async(c);
+
+ wake_up(&bch2_read_only_wait);
+ return ret;
+}
+
+static int bch2_fs_read_write_late(struct bch_fs *c)
+{
+ int ret;
+
+ /*
+ * Data move operations can't run until after check_snapshots has
+ * completed, and bch2_snapshot_is_ancestor() is available.
+ *
+ * Ideally we'd start copygc/rebalance earlier instead of waiting for
+ * all of recovery/fsck to complete:
+ */
+ ret = bch2_copygc_start(c);
+ if (ret) {
+ bch_err(c, "error starting copygc thread");
+ return ret;
+ }
+
+ ret = bch2_rebalance_start(c);
+ if (ret) {
+ bch_err(c, "error starting rebalance thread");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __bch2_fs_read_write(struct bch_fs *c, bool early)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ int ret;
+
+ if (test_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags)) {
+ bch_err(c, "cannot go rw, unfixed btree errors");
+ return -BCH_ERR_erofs_unfixed_errors;
+ }
+
+ if (test_bit(BCH_FS_RW, &c->flags))
+ return 0;
+
+ if (c->opts.norecovery)
+ return -BCH_ERR_erofs_norecovery;
+
+ /*
+ * nochanges is used for fsck -n mode - we have to allow going rw
+ * during recovery for that to work:
+ */
+ if (c->opts.nochanges && (!early || c->opts.read_only))
+ return -BCH_ERR_erofs_nochanges;
+
+ bch_info(c, "going read-write");
+
+ ret = bch2_members_v2_init(c);
+ if (ret)
+ goto err;
+
+ ret = bch2_fs_mark_dirty(c);
+ if (ret)
+ goto err;
+
+ clear_bit(BCH_FS_CLEAN_SHUTDOWN, &c->flags);
+
+ /*
+ * First journal write must be a flush write: after a clean shutdown we
+ * don't read the journal, so the first journal write may end up
+ * overwriting whatever was there previously, and there must always be
+ * at least one non-flush write in the journal or recovery will fail:
+ */
+ set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
+
+ for_each_rw_member(ca, c, i)
+ bch2_dev_allocator_add(c, ca);
+ bch2_recalc_capacity(c);
+
+ ret = bch2_gc_thread_start(c);
+ if (ret) {
+ bch_err(c, "error starting gc thread");
+ return ret;
+ }
+
+ ret = bch2_journal_reclaim_start(&c->journal);
+ if (ret)
+ goto err;
+
+ if (!early) {
+ ret = bch2_fs_read_write_late(c);
+ if (ret)
+ goto err;
+ }
+
+#ifndef BCH_WRITE_REF_DEBUG
+ percpu_ref_reinit(&c->writes);
+#else
+ for (i = 0; i < BCH_WRITE_REF_NR; i++) {
+ BUG_ON(atomic_long_read(&c->writes[i]));
+ atomic_long_inc(&c->writes[i]);
+ }
+#endif
+ set_bit(BCH_FS_RW, &c->flags);
+ set_bit(BCH_FS_WAS_RW, &c->flags);
+
+ bch2_do_discards(c);
+ bch2_do_invalidates(c);
+ bch2_do_stripe_deletes(c);
+ bch2_do_pending_node_rewrites(c);
+ return 0;
+err:
+ __bch2_fs_read_only(c);
+ return ret;
+}
+
+int bch2_fs_read_write(struct bch_fs *c)
+{
+ return __bch2_fs_read_write(c, false);
+}
+
+int bch2_fs_read_write_early(struct bch_fs *c)
+{
+ lockdep_assert_held(&c->state_lock);
+
+ return __bch2_fs_read_write(c, true);
+}
+
+/* Filesystem startup/shutdown: */
+
+static void __bch2_fs_free(struct bch_fs *c)
+{
+ unsigned i;
+
+ for (i = 0; i < BCH_TIME_STAT_NR; i++)
+ bch2_time_stats_exit(&c->times[i]);
+
+ bch2_free_pending_node_rewrites(c);
+ bch2_fs_counters_exit(c);
+ bch2_fs_snapshots_exit(c);
+ bch2_fs_quota_exit(c);
+ bch2_fs_fs_io_direct_exit(c);
+ bch2_fs_fs_io_buffered_exit(c);
+ bch2_fs_fsio_exit(c);
+ bch2_fs_ec_exit(c);
+ bch2_fs_encryption_exit(c);
+ bch2_fs_nocow_locking_exit(c);
+ bch2_fs_io_write_exit(c);
+ bch2_fs_io_read_exit(c);
+ bch2_fs_buckets_waiting_for_journal_exit(c);
+ bch2_fs_btree_interior_update_exit(c);
+ bch2_fs_btree_iter_exit(c);
+ bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
+ bch2_fs_btree_cache_exit(c);
+ bch2_fs_replicas_exit(c);
+ bch2_fs_journal_exit(&c->journal);
+ bch2_io_clock_exit(&c->io_clock[WRITE]);
+ bch2_io_clock_exit(&c->io_clock[READ]);
+ bch2_fs_compress_exit(c);
+ bch2_journal_keys_free(&c->journal_keys);
+ bch2_journal_entries_free(c);
+ bch2_fs_btree_write_buffer_exit(c);
+ percpu_free_rwsem(&c->mark_lock);
+ free_percpu(c->online_reserved);
+
+ darray_exit(&c->btree_roots_extra);
+ free_percpu(c->pcpu);
+ mempool_exit(&c->large_bkey_pool);
+ mempool_exit(&c->btree_bounce_pool);
+ bioset_exit(&c->btree_bio);
+ mempool_exit(&c->fill_iter);
+#ifndef BCH_WRITE_REF_DEBUG
+ percpu_ref_exit(&c->writes);
+#endif
+ kfree(rcu_dereference_protected(c->disk_groups, 1));
+ kfree(c->journal_seq_blacklist_table);
+ kfree(c->unused_inode_hints);
+
+ if (c->write_ref_wq)
+ destroy_workqueue(c->write_ref_wq);
+ if (c->io_complete_wq)
+ destroy_workqueue(c->io_complete_wq);
+ if (c->copygc_wq)
+ destroy_workqueue(c->copygc_wq);
+ if (c->btree_io_complete_wq)
+ destroy_workqueue(c->btree_io_complete_wq);
+ if (c->btree_update_wq)
+ destroy_workqueue(c->btree_update_wq);
+
+ bch2_free_super(&c->disk_sb);
+ kvpfree(c, sizeof(*c));
+ module_put(THIS_MODULE);
+}
+
+static void bch2_fs_release(struct kobject *kobj)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
+
+ __bch2_fs_free(c);
+}
+
+void __bch2_fs_stop(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ bch_verbose(c, "shutting down");
+
+ set_bit(BCH_FS_STOPPING, &c->flags);
+
+ cancel_work_sync(&c->journal_seq_blacklist_gc_work);
+
+ down_write(&c->state_lock);
+ bch2_fs_read_only(c);
+ up_write(&c->state_lock);
+
+ for_each_member_device(ca, c, i)
+ if (ca->kobj.state_in_sysfs &&
+ ca->disk_sb.bdev)
+ sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
+
+ if (c->kobj.state_in_sysfs)
+ kobject_del(&c->kobj);
+
+ bch2_fs_debug_exit(c);
+ bch2_fs_chardev_exit(c);
+
+ kobject_put(&c->counters_kobj);
+ kobject_put(&c->time_stats);
+ kobject_put(&c->opts_dir);
+ kobject_put(&c->internal);
+
+ /* btree prefetch might have kicked off reads in the background: */
+ bch2_btree_flush_all_reads(c);
+
+ for_each_member_device(ca, c, i)
+ cancel_work_sync(&ca->io_error_work);
+
+ cancel_work_sync(&c->read_only_work);
+}
+
+void bch2_fs_free(struct bch_fs *c)
+{
+ unsigned i;
+
+ mutex_lock(&bch_fs_list_lock);
+ list_del(&c->list);
+ mutex_unlock(&bch_fs_list_lock);
+
+ closure_sync(&c->cl);
+ closure_debug_destroy(&c->cl);
+
+ for (i = 0; i < c->sb.nr_devices; i++) {
+ struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
+
+ if (ca) {
+ bch2_free_super(&ca->disk_sb);
+ bch2_dev_free(ca);
+ }
+ }
+
+ bch_verbose(c, "shutdown complete");
+
+ kobject_put(&c->kobj);
+}
+
+void bch2_fs_stop(struct bch_fs *c)
+{
+ __bch2_fs_stop(c);
+ bch2_fs_free(c);
+}
+
+static int bch2_fs_online(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ int ret = 0;
+
+ lockdep_assert_held(&bch_fs_list_lock);
+
+ if (__bch2_uuid_to_fs(c->sb.uuid)) {
+ bch_err(c, "filesystem UUID already open");
+ return -EINVAL;
+ }
+
+ ret = bch2_fs_chardev_init(c);
+ if (ret) {
+ bch_err(c, "error creating character device");
+ return ret;
+ }
+
+ bch2_fs_debug_init(c);
+
+ ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
+ kobject_add(&c->internal, &c->kobj, "internal") ?:
+ kobject_add(&c->opts_dir, &c->kobj, "options") ?:
+ kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
+ kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
+ bch2_opts_create_sysfs_files(&c->opts_dir);
+ if (ret) {
+ bch_err(c, "error creating sysfs objects");
+ return ret;
+ }
+
+ down_write(&c->state_lock);
+
+ for_each_member_device(ca, c, i) {
+ ret = bch2_dev_sysfs_online(c, ca);
+ if (ret) {
+ bch_err(c, "error creating sysfs objects");
+ percpu_ref_put(&ca->ref);
+ goto err;
+ }
+ }
+
+ BUG_ON(!list_empty(&c->list));
+ list_add(&c->list, &bch_fs_list);
+err:
+ up_write(&c->state_lock);
+ return ret;
+}
+
+static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
+{
+ struct bch_fs *c;
+ struct printbuf name = PRINTBUF;
+ unsigned i, iter_size;
+ int ret = 0;
+
+ c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
+ if (!c) {
+ c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
+ goto out;
+ }
+
+ __module_get(THIS_MODULE);
+
+ closure_init(&c->cl, NULL);
+
+ c->kobj.kset = bcachefs_kset;
+ kobject_init(&c->kobj, &bch2_fs_ktype);
+ kobject_init(&c->internal, &bch2_fs_internal_ktype);
+ kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
+ kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
+ kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
+
+ c->minor = -1;
+ c->disk_sb.fs_sb = true;
+
+ init_rwsem(&c->state_lock);
+ mutex_init(&c->sb_lock);
+ mutex_init(&c->replicas_gc_lock);
+ mutex_init(&c->btree_root_lock);
+ INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
+
+ init_rwsem(&c->gc_lock);
+ mutex_init(&c->gc_gens_lock);
+
+ for (i = 0; i < BCH_TIME_STAT_NR; i++)
+ bch2_time_stats_init(&c->times[i]);
+
+ bch2_fs_copygc_init(c);
+ bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
+ bch2_fs_btree_interior_update_init_early(c);
+ bch2_fs_allocator_background_init(c);
+ bch2_fs_allocator_foreground_init(c);
+ bch2_fs_rebalance_init(c);
+ bch2_fs_quota_init(c);
+ bch2_fs_ec_init_early(c);
+ bch2_fs_move_init(c);
+
+ INIT_LIST_HEAD(&c->list);
+
+ mutex_init(&c->usage_scratch_lock);
+
+ mutex_init(&c->bio_bounce_pages_lock);
+ mutex_init(&c->snapshot_table_lock);
+ init_rwsem(&c->snapshot_create_lock);
+
+ spin_lock_init(&c->btree_write_error_lock);
+
+ INIT_WORK(&c->journal_seq_blacklist_gc_work,
+ bch2_blacklist_entries_gc);
+
+ INIT_LIST_HEAD(&c->journal_iters);
+
+ INIT_LIST_HEAD(&c->fsck_errors);
+ mutex_init(&c->fsck_error_lock);
+
+ seqcount_init(&c->gc_pos_lock);
+
+ seqcount_init(&c->usage_lock);
+
+ sema_init(&c->io_in_flight, 128);
+
+ INIT_LIST_HEAD(&c->vfs_inodes_list);
+ mutex_init(&c->vfs_inodes_lock);
+
+ c->copy_gc_enabled = 1;
+ c->rebalance.enabled = 1;
+ c->promote_whole_extents = true;
+
+ c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
+ c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
+ c->journal.blocked_time = &c->times[BCH_TIME_blocked_journal];
+ c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
+
+ bch2_fs_btree_cache_init_early(&c->btree_cache);
+
+ mutex_init(&c->sectors_available_lock);
+
+ ret = percpu_init_rwsem(&c->mark_lock);
+ if (ret)
+ goto err;
+
+ mutex_lock(&c->sb_lock);
+ ret = bch2_sb_to_fs(c, sb);
+ mutex_unlock(&c->sb_lock);
+
+ if (ret)
+ goto err;
+
+ pr_uuid(&name, c->sb.user_uuid.b);
+ strscpy(c->name, name.buf, sizeof(c->name));
+ printbuf_exit(&name);
+
+ ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
+ if (ret)
+ goto err;
+
+ /* Compat: */
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
+ !BCH_SB_JOURNAL_FLUSH_DELAY(sb))
+ SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
+
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
+ !BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
+ SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
+
+ c->opts = bch2_opts_default;
+ ret = bch2_opts_from_sb(&c->opts, sb);
+ if (ret)
+ goto err;
+
+ bch2_opts_apply(&c->opts, opts);
+
+ c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
+ if (c->opts.inodes_use_key_cache)
+ c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
+ c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops;
+
+ c->block_bits = ilog2(block_sectors(c));
+ c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
+
+ if (bch2_fs_init_fault("fs_alloc")) {
+ bch_err(c, "fs_alloc fault injected");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ iter_size = sizeof(struct sort_iter) +
+ (btree_blocks(c) + 1) * 2 *
+ sizeof(struct sort_iter_set);
+
+ c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
+
+ if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
+ WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512)) ||
+ !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
+ !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
+ !(c->io_complete_wq = alloc_workqueue("bcachefs_io",
+ WQ_FREEZABLE|WQ_HIGHPRI|WQ_MEM_RECLAIM, 1)) ||
+ !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
+ WQ_FREEZABLE, 0)) ||
+#ifndef BCH_WRITE_REF_DEBUG
+ percpu_ref_init(&c->writes, bch2_writes_disabled,
+ PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
+#endif
+ mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
+ bioset_init(&c->btree_bio, 1,
+ max(offsetof(struct btree_read_bio, bio),
+ offsetof(struct btree_write_bio, wbio.bio)),
+ BIOSET_NEED_BVECS) ||
+ !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
+ !(c->online_reserved = alloc_percpu(u64)) ||
+ mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
+ btree_bytes(c)) ||
+ mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
+ !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
+ sizeof(u64), GFP_KERNEL))) {
+ ret = -BCH_ERR_ENOMEM_fs_other_alloc;
+ goto err;
+ }
+
+ ret = bch2_fs_counters_init(c) ?:
+ bch2_io_clock_init(&c->io_clock[READ]) ?:
+ bch2_io_clock_init(&c->io_clock[WRITE]) ?:
+ bch2_fs_journal_init(&c->journal) ?:
+ bch2_fs_replicas_init(c) ?:
+ bch2_fs_btree_cache_init(c) ?:
+ bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
+ bch2_fs_btree_iter_init(c) ?:
+ bch2_fs_btree_interior_update_init(c) ?:
+ bch2_fs_buckets_waiting_for_journal_init(c) ?:
+ bch2_fs_btree_write_buffer_init(c) ?:
+ bch2_fs_subvolumes_init(c) ?:
+ bch2_fs_io_read_init(c) ?:
+ bch2_fs_io_write_init(c) ?:
+ bch2_fs_nocow_locking_init(c) ?:
+ bch2_fs_encryption_init(c) ?:
+ bch2_fs_compress_init(c) ?:
+ bch2_fs_ec_init(c) ?:
+ bch2_fs_fsio_init(c) ?:
+ bch2_fs_fs_io_buffered_init(c) ?:
+ bch2_fs_fs_io_direct_init(c);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < c->sb.nr_devices; i++)
+ if (bch2_dev_exists(c->disk_sb.sb, i) &&
+ bch2_dev_alloc(c, i)) {
+ ret = -EEXIST;
+ goto err;
+ }
+
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->btree_root_journal_res,
+ BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
+ bch2_dev_usage_journal_reserve(c);
+ bch2_journal_entry_res_resize(&c->journal,
+ &c->clock_journal_res,
+ (sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
+
+ mutex_lock(&bch_fs_list_lock);
+ ret = bch2_fs_online(c);
+ mutex_unlock(&bch_fs_list_lock);
+
+ if (ret)
+ goto err;
+out:
+ return c;
+err:
+ bch2_fs_free(c);
+ c = ERR_PTR(ret);
+ goto out;
+}
+
+noinline_for_stack
+static void print_mount_opts(struct bch_fs *c)
+{
+ enum bch_opt_id i;
+ struct printbuf p = PRINTBUF;
+ bool first = true;
+
+ prt_str(&p, "mounting version ");
+ bch2_version_to_text(&p, c->sb.version);
+
+ if (c->opts.read_only) {
+ prt_str(&p, " opts=");
+ first = false;
+ prt_printf(&p, "ro");
+ }
+
+ for (i = 0; i < bch2_opts_nr; i++) {
+ const struct bch_option *opt = &bch2_opt_table[i];
+ u64 v = bch2_opt_get_by_id(&c->opts, i);
+
+ if (!(opt->flags & OPT_MOUNT))
+ continue;
+
+ if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
+ continue;
+
+ prt_str(&p, first ? " opts=" : ",");
+ first = false;
+ bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
+ }
+
+ bch_info(c, "%s", p.buf);
+ printbuf_exit(&p);
+}
+
+int bch2_fs_start(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ time64_t now = ktime_get_real_seconds();
+ unsigned i;
+ int ret;
+
+ print_mount_opts(c);
+
+ down_write(&c->state_lock);
+
+ BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
+
+ mutex_lock(&c->sb_lock);
+
+ ret = bch2_members_v2_init(c);
+ if (ret) {
+ mutex_unlock(&c->sb_lock);
+ goto err;
+ }
+
+ for_each_online_member(ca, c, i)
+ bch2_sb_from_fs(c, ca);
+
+ for_each_online_member(ca, c, i)
+ bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
+
+ mutex_unlock(&c->sb_lock);
+
+ for_each_rw_member(ca, c, i)
+ bch2_dev_allocator_add(c, ca);
+ bch2_recalc_capacity(c);
+
+ for (i = 0; i < BCH_TRANSACTIONS_NR; i++) {
+ mutex_lock(&c->btree_transaction_stats[i].lock);
+ bch2_time_stats_init(&c->btree_transaction_stats[i].lock_hold_times);
+ mutex_unlock(&c->btree_transaction_stats[i].lock);
+ }
+
+ ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
+ ? bch2_fs_recovery(c)
+ : bch2_fs_initialize(c);
+ if (ret)
+ goto err;
+
+ ret = bch2_opts_check_may_set(c);
+ if (ret)
+ goto err;
+
+ if (bch2_fs_init_fault("fs_start")) {
+ bch_err(c, "fs_start fault injected");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ set_bit(BCH_FS_STARTED, &c->flags);
+
+ if (c->opts.read_only || c->opts.nochanges) {
+ bch2_fs_read_only(c);
+ } else {
+ ret = !test_bit(BCH_FS_RW, &c->flags)
+ ? bch2_fs_read_write(c)
+ : bch2_fs_read_write_late(c);
+ if (ret)
+ goto err;
+ }
+
+ ret = 0;
+out:
+ up_write(&c->state_lock);
+ return ret;
+err:
+ bch_err_msg(c, ret, "starting filesystem");
+ goto out;
+}
+
+static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
+{
+ struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
+
+ if (le16_to_cpu(sb->block_size) != block_sectors(c))
+ return -BCH_ERR_mismatched_block_size;
+
+ if (le16_to_cpu(m.bucket_size) <
+ BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
+ return -BCH_ERR_bucket_size_too_small;
+
+ return 0;
+}
+
+static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
+{
+ struct bch_sb *newest =
+ le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
+
+ if (!uuid_equal(&fs->uuid, &sb->uuid))
+ return -BCH_ERR_device_not_a_member_of_filesystem;
+
+ if (!bch2_dev_exists(newest, sb->dev_idx))
+ return -BCH_ERR_device_has_been_removed;
+
+ if (fs->block_size != sb->block_size)
+ return -BCH_ERR_mismatched_block_size;
+
+ return 0;
+}
+
+/* Device startup/shutdown: */
+
+static void bch2_dev_release(struct kobject *kobj)
+{
+ struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
+
+ kfree(ca);
+}
+
+static void bch2_dev_free(struct bch_dev *ca)
+{
+ cancel_work_sync(&ca->io_error_work);
+
+ if (ca->kobj.state_in_sysfs &&
+ ca->disk_sb.bdev)
+ sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
+
+ if (ca->kobj.state_in_sysfs)
+ kobject_del(&ca->kobj);
+
+ bch2_free_super(&ca->disk_sb);
+ bch2_dev_journal_exit(ca);
+
+ free_percpu(ca->io_done);
+ bioset_exit(&ca->replica_set);
+ bch2_dev_buckets_free(ca);
+ free_page((unsigned long) ca->sb_read_scratch);
+
+ bch2_time_stats_exit(&ca->io_latency[WRITE]);
+ bch2_time_stats_exit(&ca->io_latency[READ]);
+
+ percpu_ref_exit(&ca->io_ref);
+ percpu_ref_exit(&ca->ref);
+ kobject_put(&ca->kobj);
+}
+
+static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
+{
+
+ lockdep_assert_held(&c->state_lock);
+
+ if (percpu_ref_is_zero(&ca->io_ref))
+ return;
+
+ __bch2_dev_read_only(c, ca);
+
+ reinit_completion(&ca->io_ref_completion);
+ percpu_ref_kill(&ca->io_ref);
+ wait_for_completion(&ca->io_ref_completion);
+
+ if (ca->kobj.state_in_sysfs) {
+ sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
+ sysfs_remove_link(&ca->kobj, "block");
+ }
+
+ bch2_free_super(&ca->disk_sb);
+ bch2_dev_journal_exit(ca);
+}
+
+static void bch2_dev_ref_complete(struct percpu_ref *ref)
+{
+ struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
+
+ complete(&ca->ref_completion);
+}
+
+static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
+{
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
+
+ complete(&ca->io_ref_completion);
+}
+
+static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
+{
+ int ret;
+
+ if (!c->kobj.state_in_sysfs)
+ return 0;
+
+ if (!ca->kobj.state_in_sysfs) {
+ ret = kobject_add(&ca->kobj, &c->kobj,
+ "dev-%u", ca->dev_idx);
+ if (ret)
+ return ret;
+ }
+
+ if (ca->disk_sb.bdev) {
+ struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
+
+ ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&ca->kobj, block, "block");
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
+ struct bch_member *member)
+{
+ struct bch_dev *ca;
+
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca)
+ return NULL;
+
+ kobject_init(&ca->kobj, &bch2_dev_ktype);
+ init_completion(&ca->ref_completion);
+ init_completion(&ca->io_ref_completion);
+
+ init_rwsem(&ca->bucket_lock);
+
+ INIT_WORK(&ca->io_error_work, bch2_io_error_work);
+
+ bch2_time_stats_init(&ca->io_latency[READ]);
+ bch2_time_stats_init(&ca->io_latency[WRITE]);
+
+ ca->mi = bch2_mi_to_cpu(member);
+ ca->uuid = member->uuid;
+
+ ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
+ ca->mi.bucket_size / btree_sectors(c));
+
+ if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
+ 0, GFP_KERNEL) ||
+ percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
+ PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
+ !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
+ bch2_dev_buckets_alloc(c, ca) ||
+ bioset_init(&ca->replica_set, 4,
+ offsetof(struct bch_write_bio, bio), 0) ||
+ !(ca->io_done = alloc_percpu(*ca->io_done)))
+ goto err;
+
+ return ca;
+err:
+ bch2_dev_free(ca);
+ return NULL;
+}
+
+static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
+ unsigned dev_idx)
+{
+ ca->dev_idx = dev_idx;
+ __set_bit(ca->dev_idx, ca->self.d);
+ scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
+
+ ca->fs = c;
+ rcu_assign_pointer(c->devs[ca->dev_idx], ca);
+
+ if (bch2_dev_sysfs_online(c, ca))
+ pr_warn("error creating sysfs objects");
+}
+
+static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
+{
+ struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
+ struct bch_dev *ca = NULL;
+ int ret = 0;
+
+ if (bch2_fs_init_fault("dev_alloc"))
+ goto err;
+
+ ca = __bch2_dev_alloc(c, &member);
+ if (!ca)
+ goto err;
+
+ ca->fs = c;
+
+ bch2_dev_attach(c, ca, dev_idx);
+ return ret;
+err:
+ if (ca)
+ bch2_dev_free(ca);
+ return -BCH_ERR_ENOMEM_dev_alloc;
+}
+
+static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
+{
+ unsigned ret;
+
+ if (bch2_dev_is_online(ca)) {
+ bch_err(ca, "already have device online in slot %u",
+ sb->sb->dev_idx);
+ return -BCH_ERR_device_already_online;
+ }
+
+ if (get_capacity(sb->bdev->bd_disk) <
+ ca->mi.bucket_size * ca->mi.nbuckets) {
+ bch_err(ca, "cannot online: device too small");
+ return -BCH_ERR_device_size_too_small;
+ }
+
+ BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
+
+ ret = bch2_dev_journal_init(ca, sb->sb);
+ if (ret)
+ return ret;
+
+ /* Commit: */
+ ca->disk_sb = *sb;
+ memset(sb, 0, sizeof(*sb));
+
+ ca->dev = ca->disk_sb.bdev->bd_dev;
+
+ percpu_ref_reinit(&ca->io_ref);
+
+ return 0;
+}
+
+static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
+{
+ struct bch_dev *ca;
+ int ret;
+
+ lockdep_assert_held(&c->state_lock);
+
+ if (le64_to_cpu(sb->sb->seq) >
+ le64_to_cpu(c->disk_sb.sb->seq))
+ bch2_sb_to_fs(c, sb->sb);
+
+ BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
+ !c->devs[sb->sb->dev_idx]);
+
+ ca = bch_dev_locked(c, sb->sb->dev_idx);
+
+ ret = __bch2_dev_attach_bdev(ca, sb);
+ if (ret)
+ return ret;
+
+ bch2_dev_sysfs_online(c, ca);
+
+ if (c->sb.nr_devices == 1)
+ snprintf(c->name, sizeof(c->name), "%pg", ca->disk_sb.bdev);
+ snprintf(ca->name, sizeof(ca->name), "%pg", ca->disk_sb.bdev);
+
+ rebalance_wakeup(c);
+ return 0;
+}
+
+/* Device management: */
+
+/*
+ * Note: this function is also used by the error paths - when a particular
+ * device sees an error, we call it to determine whether we can just set the
+ * device RO, or - if this function returns false - we'll set the whole
+ * filesystem RO:
+ *
+ * XXX: maybe we should be more explicit about whether we're changing state
+ * because we got an error or what have you?
+ */
+bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_member_state new_state, int flags)
+{
+ struct bch_devs_mask new_online_devs;
+ struct bch_dev *ca2;
+ int i, nr_rw = 0, required;
+
+ lockdep_assert_held(&c->state_lock);
+
+ switch (new_state) {
+ case BCH_MEMBER_STATE_rw:
+ return true;
+ case BCH_MEMBER_STATE_ro:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw)
+ return true;
+
+ /* do we have enough devices to write to? */
+ for_each_member_device(ca2, c, i)
+ if (ca2 != ca)
+ nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
+
+ required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
+ ? c->opts.metadata_replicas
+ : c->opts.metadata_replicas_required,
+ !(flags & BCH_FORCE_IF_DATA_DEGRADED)
+ ? c->opts.data_replicas
+ : c->opts.data_replicas_required);
+
+ return nr_rw >= required;
+ case BCH_MEMBER_STATE_failed:
+ case BCH_MEMBER_STATE_spare:
+ if (ca->mi.state != BCH_MEMBER_STATE_rw &&
+ ca->mi.state != BCH_MEMBER_STATE_ro)
+ return true;
+
+ /* do we have enough devices to read from? */
+ new_online_devs = bch2_online_devs(c);
+ __clear_bit(ca->dev_idx, new_online_devs.d);
+
+ return bch2_have_enough_devs(c, new_online_devs, flags, false);
+ default:
+ BUG();
+ }
+}
+
+static bool bch2_fs_may_start(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i, flags = 0;
+
+ if (c->opts.very_degraded)
+ flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
+
+ if (c->opts.degraded)
+ flags |= BCH_FORCE_IF_DEGRADED;
+
+ if (!c->opts.degraded &&
+ !c->opts.very_degraded) {
+ mutex_lock(&c->sb_lock);
+
+ for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
+ if (!bch2_dev_exists(c->disk_sb.sb, i))
+ continue;
+
+ ca = bch_dev_locked(c, i);
+
+ if (!bch2_dev_is_online(ca) &&
+ (ca->mi.state == BCH_MEMBER_STATE_rw ||
+ ca->mi.state == BCH_MEMBER_STATE_ro)) {
+ mutex_unlock(&c->sb_lock);
+ return false;
+ }
+ }
+ mutex_unlock(&c->sb_lock);
+ }
+
+ return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
+}
+
+static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
+{
+ /*
+ * The allocator thread itself allocates btree nodes, so stop it first:
+ */
+ bch2_dev_allocator_remove(c, ca);
+ bch2_dev_journal_stop(&c->journal, ca);
+}
+
+static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
+{
+ lockdep_assert_held(&c->state_lock);
+
+ BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
+
+ bch2_dev_allocator_add(c, ca);
+ bch2_recalc_capacity(c);
+}
+
+int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_member_state new_state, int flags)
+{
+ struct bch_member *m;
+ int ret = 0;
+
+ if (ca->mi.state == new_state)
+ return 0;
+
+ if (!bch2_dev_state_allowed(c, ca, new_state, flags))
+ return -BCH_ERR_device_state_not_allowed;
+
+ if (new_state != BCH_MEMBER_STATE_rw)
+ __bch2_dev_read_only(c, ca);
+
+ bch_notice(ca, "%s", bch2_member_states[new_state]);
+
+ mutex_lock(&c->sb_lock);
+ m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ SET_BCH_MEMBER_STATE(m, new_state);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ if (new_state == BCH_MEMBER_STATE_rw)
+ __bch2_dev_read_write(c, ca);
+
+ rebalance_wakeup(c);
+
+ return ret;
+}
+
+int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_member_state new_state, int flags)
+{
+ int ret;
+
+ down_write(&c->state_lock);
+ ret = __bch2_dev_set_state(c, ca, new_state, flags);
+ up_write(&c->state_lock);
+
+ return ret;
+}
+
+/* Device add/removal: */
+
+static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct bpos start = POS(ca->dev_idx, 0);
+ struct bpos end = POS(ca->dev_idx, U64_MAX);
+ int ret;
+
+ /*
+ * We clear the LRU and need_discard btrees first so that we don't race
+ * with bch2_do_invalidates() and bch2_do_discards()
+ */
+ ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
+ BTREE_TRIGGER_NORUN, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
+ BTREE_TRIGGER_NORUN, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
+ BTREE_TRIGGER_NORUN, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
+ BTREE_TRIGGER_NORUN, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
+ BTREE_TRIGGER_NORUN, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
+ BTREE_TRIGGER_NORUN, NULL);
+ if (ret)
+ bch_err_msg(c, ret, "removing dev alloc info");
+
+ return ret;
+}
+
+int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
+{
+ struct bch_member *m;
+ unsigned dev_idx = ca->dev_idx, data;
+ int ret;
+
+ down_write(&c->state_lock);
+
+ /*
+ * We consume a reference to ca->ref, regardless of whether we succeed
+ * or fail:
+ */
+ percpu_ref_put(&ca->ref);
+
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
+ bch_err(ca, "Cannot remove without losing data");
+ ret = -BCH_ERR_device_state_not_allowed;
+ goto err;
+ }
+
+ __bch2_dev_read_only(c, ca);
+
+ ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
+ if (ret) {
+ bch_err_msg(ca, ret, "dropping data");
+ goto err;
+ }
+
+ ret = bch2_dev_remove_alloc(c, ca);
+ if (ret) {
+ bch_err_msg(ca, ret, "deleting alloc info");
+ goto err;
+ }
+
+ ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
+ if (ret) {
+ bch_err_msg(ca, ret, "flushing journal");
+ goto err;
+ }
+
+ ret = bch2_journal_flush(&c->journal);
+ if (ret) {
+ bch_err(ca, "journal error");
+ goto err;
+ }
+
+ ret = bch2_replicas_gc2(c);
+ if (ret) {
+ bch_err_msg(ca, ret, "in replicas_gc2()");
+ goto err;
+ }
+
+ data = bch2_dev_has_data(c, ca);
+ if (data) {
+ struct printbuf data_has = PRINTBUF;
+
+ prt_bitflags(&data_has, bch2_data_types, data);
+ bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
+ printbuf_exit(&data_has);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ __bch2_dev_offline(c, ca);
+
+ mutex_lock(&c->sb_lock);
+ rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
+ mutex_unlock(&c->sb_lock);
+
+ percpu_ref_kill(&ca->ref);
+ wait_for_completion(&ca->ref_completion);
+
+ bch2_dev_free(ca);
+
+ /*
+ * At this point the device object has been removed in-core, but the
+ * on-disk journal might still refer to the device index via sb device
+ * usage entries. Recovery fails if it sees usage information for an
+ * invalid device. Flush journal pins to push the back of the journal
+ * past now invalid device index references before we update the
+ * superblock, but after the device object has been removed so any
+ * further journal writes elide usage info for the device.
+ */
+ bch2_journal_flush_all_pins(&c->journal);
+
+ /*
+ * Free this device's slot in the bch_member array - all pointers to
+ * this device must be gone:
+ */
+ mutex_lock(&c->sb_lock);
+ m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
+ memset(&m->uuid, 0, sizeof(m->uuid));
+
+ bch2_write_super(c);
+
+ mutex_unlock(&c->sb_lock);
+ up_write(&c->state_lock);
+
+ bch2_dev_usage_journal_reserve(c);
+ return 0;
+err:
+ if (ca->mi.state == BCH_MEMBER_STATE_rw &&
+ !percpu_ref_is_zero(&ca->io_ref))
+ __bch2_dev_read_write(c, ca);
+ up_write(&c->state_lock);
+ return ret;
+}
+
+/* Add new device to running filesystem: */
+int bch2_dev_add(struct bch_fs *c, const char *path)
+{
+ struct bch_opts opts = bch2_opts_empty();
+ struct bch_sb_handle sb;
+ struct bch_dev *ca = NULL;
+ struct bch_sb_field_members_v2 *mi;
+ struct bch_member dev_mi;
+ unsigned dev_idx, nr_devices, u64s;
+ struct printbuf errbuf = PRINTBUF;
+ struct printbuf label = PRINTBUF;
+ int ret;
+
+ ret = bch2_read_super(path, &opts, &sb);
+ if (ret) {
+ bch_err_msg(c, ret, "reading super");
+ goto err;
+ }
+
+ dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
+
+ if (BCH_MEMBER_GROUP(&dev_mi)) {
+ bch2_disk_path_to_text(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
+ if (label.allocation_failure) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ ret = bch2_dev_may_add(sb.sb, c);
+ if (ret) {
+ bch_err_fn(c, ret);
+ goto err;
+ }
+
+ ca = __bch2_dev_alloc(c, &dev_mi);
+ if (!ca) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ bch2_dev_usage_init(ca);
+
+ ret = __bch2_dev_attach_bdev(ca, &sb);
+ if (ret)
+ goto err;
+
+ ret = bch2_dev_journal_alloc(ca);
+ if (ret) {
+ bch_err_msg(c, ret, "allocating journal");
+ goto err;
+ }
+
+ down_write(&c->state_lock);
+ mutex_lock(&c->sb_lock);
+
+ ret = bch2_sb_from_fs(c, ca);
+ if (ret) {
+ bch_err_msg(c, ret, "setting up new superblock");
+ goto err_unlock;
+ }
+
+ mi = bch2_sb_field_get(ca->disk_sb.sb, members_v2);
+
+ if (!bch2_sb_field_resize(&ca->disk_sb, members_v2,
+ le32_to_cpu(mi->field.u64s) +
+ sizeof(dev_mi) / sizeof(u64))) {
+ ret = -BCH_ERR_ENOSPC_sb_members;
+ bch_err_msg(c, ret, "setting up new superblock");
+ goto err_unlock;
+ }
+
+ if (dynamic_fault("bcachefs:add:no_slot"))
+ goto no_slot;
+
+ for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
+ if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
+ goto have_slot;
+no_slot:
+ ret = -BCH_ERR_ENOSPC_sb_members;
+ bch_err_msg(c, ret, "setting up new superblock");
+ goto err_unlock;
+
+have_slot:
+ nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
+ u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
+ le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
+
+ mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
+ if (!mi) {
+ ret = -BCH_ERR_ENOSPC_sb_members;
+ bch_err_msg(c, ret, "setting up new superblock");
+ goto err_unlock;
+ }
+ struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
+
+ /* success: */
+
+ *m = dev_mi;
+ m->last_mount = cpu_to_le64(ktime_get_real_seconds());
+ c->disk_sb.sb->nr_devices = nr_devices;
+
+ ca->disk_sb.sb->dev_idx = dev_idx;
+ bch2_dev_attach(c, ca, dev_idx);
+
+ if (BCH_MEMBER_GROUP(&dev_mi)) {
+ ret = __bch2_dev_group_set(c, ca, label.buf);
+ if (ret) {
+ bch_err_msg(c, ret, "creating new label");
+ goto err_unlock;
+ }
+ }
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ bch2_dev_usage_journal_reserve(c);
+
+ ret = bch2_trans_mark_dev_sb(c, ca);
+ if (ret) {
+ bch_err_msg(c, ret, "marking new superblock");
+ goto err_late;
+ }
+
+ ret = bch2_fs_freespace_init(c);
+ if (ret) {
+ bch_err_msg(c, ret, "initializing free space");
+ goto err_late;
+ }
+
+ ca->new_fs_bucket_idx = 0;
+
+ if (ca->mi.state == BCH_MEMBER_STATE_rw)
+ __bch2_dev_read_write(c, ca);
+
+ up_write(&c->state_lock);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&c->sb_lock);
+ up_write(&c->state_lock);
+err:
+ if (ca)
+ bch2_dev_free(ca);
+ bch2_free_super(&sb);
+ printbuf_exit(&label);
+ printbuf_exit(&errbuf);
+ return ret;
+err_late:
+ up_write(&c->state_lock);
+ ca = NULL;
+ goto err;
+}
+
+/* Hot add existing device to running filesystem: */
+int bch2_dev_online(struct bch_fs *c, const char *path)
+{
+ struct bch_opts opts = bch2_opts_empty();
+ struct bch_sb_handle sb = { NULL };
+ struct bch_dev *ca;
+ unsigned dev_idx;
+ int ret;
+
+ down_write(&c->state_lock);
+
+ ret = bch2_read_super(path, &opts, &sb);
+ if (ret) {
+ up_write(&c->state_lock);
+ return ret;
+ }
+
+ dev_idx = sb.sb->dev_idx;
+
+ ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
+ if (ret) {
+ bch_err_msg(c, ret, "bringing %s online", path);
+ goto err;
+ }
+
+ ret = bch2_dev_attach_bdev(c, &sb);
+ if (ret)
+ goto err;
+
+ ca = bch_dev_locked(c, dev_idx);
+
+ ret = bch2_trans_mark_dev_sb(c, ca);
+ if (ret) {
+ bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
+ goto err;
+ }
+
+ if (ca->mi.state == BCH_MEMBER_STATE_rw)
+ __bch2_dev_read_write(c, ca);
+
+ mutex_lock(&c->sb_lock);
+ struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+
+ m->last_mount =
+ cpu_to_le64(ktime_get_real_seconds());
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ ret = bch2_fs_freespace_init(c);
+ if (ret)
+ bch_err_msg(c, ret, "initializing free space");
+
+ up_write(&c->state_lock);
+ return 0;
+err:
+ up_write(&c->state_lock);
+ bch2_free_super(&sb);
+ return ret;
+}
+
+int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
+{
+ down_write(&c->state_lock);
+
+ if (!bch2_dev_is_online(ca)) {
+ bch_err(ca, "Already offline");
+ up_write(&c->state_lock);
+ return 0;
+ }
+
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
+ bch_err(ca, "Cannot offline required disk");
+ up_write(&c->state_lock);
+ return -BCH_ERR_device_state_not_allowed;
+ }
+
+ __bch2_dev_offline(c, ca);
+
+ up_write(&c->state_lock);
+ return 0;
+}
+
+int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
+{
+ struct bch_member *m;
+ u64 old_nbuckets;
+ int ret = 0;
+
+ down_write(&c->state_lock);
+ old_nbuckets = ca->mi.nbuckets;
+
+ if (nbuckets < ca->mi.nbuckets) {
+ bch_err(ca, "Cannot shrink yet");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (bch2_dev_is_online(ca) &&
+ get_capacity(ca->disk_sb.bdev->bd_disk) <
+ ca->mi.bucket_size * nbuckets) {
+ bch_err(ca, "New size larger than device");
+ ret = -BCH_ERR_device_size_too_small;
+ goto err;
+ }
+
+ ret = bch2_dev_buckets_resize(c, ca, nbuckets);
+ if (ret) {
+ bch_err_msg(ca, ret, "resizing buckets");
+ goto err;
+ }
+
+ ret = bch2_trans_mark_dev_sb(c, ca);
+ if (ret)
+ goto err;
+
+ mutex_lock(&c->sb_lock);
+ m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+ m->nbuckets = cpu_to_le64(nbuckets);
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
+ if (ca->mi.freespace_initialized) {
+ ret = bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
+ if (ret)
+ goto err;
+
+ /*
+ * XXX: this is all wrong transactionally - we'll be able to do
+ * this correctly after the disk space accounting rewrite
+ */
+ ca->usage_base->d[BCH_DATA_free].buckets += nbuckets - old_nbuckets;
+ }
+
+ bch2_recalc_capacity(c);
+err:
+ up_write(&c->state_lock);
+ return ret;
+}
+
+/* return with ref on ca->ref: */
+struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
+{
+ struct bch_dev *ca;
+ unsigned i;
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i, NULL)
+ if (!strcmp(name, ca->name))
+ goto found;
+ ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
+found:
+ rcu_read_unlock();
+
+ return ca;
+}
+
+/* Filesystem open: */
+
+struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
+ struct bch_opts opts)
+{
+ struct bch_sb_handle *sb = NULL;
+ struct bch_fs *c = NULL;
+ unsigned i, best_sb = 0;
+ struct printbuf errbuf = PRINTBUF;
+ int ret = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return ERR_PTR(-ENODEV);
+
+ if (!nr_devices) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
+ if (!sb) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < nr_devices; i++) {
+ ret = bch2_read_super(devices[i], &opts, &sb[i]);
+ if (ret)
+ goto err;
+
+ }
+
+ for (i = 1; i < nr_devices; i++)
+ if (le64_to_cpu(sb[i].sb->seq) >
+ le64_to_cpu(sb[best_sb].sb->seq))
+ best_sb = i;
+
+ i = 0;
+ while (i < nr_devices) {
+ if (i != best_sb &&
+ !bch2_dev_exists(sb[best_sb].sb, sb[i].sb->dev_idx)) {
+ pr_info("%pg has been removed, skipping", sb[i].bdev);
+ bch2_free_super(&sb[i]);
+ array_remove_item(sb, nr_devices, i);
+ continue;
+ }
+
+ ret = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
+ if (ret)
+ goto err_print;
+ i++;
+ }
+
+ c = bch2_fs_alloc(sb[best_sb].sb, opts);
+ if (IS_ERR(c)) {
+ ret = PTR_ERR(c);
+ goto err;
+ }
+
+ down_write(&c->state_lock);
+ for (i = 0; i < nr_devices; i++) {
+ ret = bch2_dev_attach_bdev(c, &sb[i]);
+ if (ret) {
+ up_write(&c->state_lock);
+ goto err;
+ }
+ }
+ up_write(&c->state_lock);
+
+ if (!bch2_fs_may_start(c)) {
+ ret = -BCH_ERR_insufficient_devices_to_start;
+ goto err_print;
+ }
+
+ if (!c->opts.nostart) {
+ ret = bch2_fs_start(c);
+ if (ret)
+ goto err;
+ }
+out:
+ kfree(sb);
+ printbuf_exit(&errbuf);
+ module_put(THIS_MODULE);
+ return c;
+err_print:
+ pr_err("bch_fs_open err opening %s: %s",
+ devices[0], bch2_err_str(ret));
+err:
+ if (!IS_ERR_OR_NULL(c))
+ bch2_fs_stop(c);
+ if (sb)
+ for (i = 0; i < nr_devices; i++)
+ bch2_free_super(&sb[i]);
+ c = ERR_PTR(ret);
+ goto out;
+}
+
+/* Global interfaces/init */
+
+static void bcachefs_exit(void)
+{
+ bch2_debug_exit();
+ bch2_vfs_exit();
+ bch2_chardev_exit();
+ bch2_btree_key_cache_exit();
+ if (bcachefs_kset)
+ kset_unregister(bcachefs_kset);
+}
+
+static int __init bcachefs_init(void)
+{
+ bch2_bkey_pack_test();
+
+ if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
+ bch2_btree_key_cache_init() ||
+ bch2_chardev_init() ||
+ bch2_vfs_init() ||
+ bch2_debug_init())
+ goto err;
+
+ return 0;
+err:
+ bcachefs_exit();
+ return -ENOMEM;
+}
+
+#define BCH_DEBUG_PARAM(name, description) \
+ bool bch2_##name; \
+ module_param_named(name, bch2_##name, bool, 0644); \
+ MODULE_PARM_DESC(name, description);
+BCH_DEBUG_PARAMS()
+#undef BCH_DEBUG_PARAM
+
+__maybe_unused
+static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
+module_param_named(version, bch2_metadata_version, uint, 0400);
+
+module_exit(bcachefs_exit);
+module_init(bcachefs_init);
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
new file mode 100644
index 000000000000..bf762df18012
--- /dev/null
+++ b/fs/bcachefs/super.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SUPER_H
+#define _BCACHEFS_SUPER_H
+
+#include "extents.h"
+
+#include "bcachefs_ioctl.h"
+
+#include <linux/math64.h>
+
+struct bch_fs *bch2_dev_to_fs(dev_t);
+struct bch_fs *bch2_uuid_to_fs(__uuid_t);
+
+bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
+ enum bch_member_state, int);
+int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
+ enum bch_member_state, int);
+int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
+ enum bch_member_state, int);
+
+int bch2_dev_fail(struct bch_dev *, int);
+int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
+int bch2_dev_add(struct bch_fs *, const char *);
+int bch2_dev_online(struct bch_fs *, const char *);
+int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
+int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64);
+struct bch_dev *bch2_dev_lookup(struct bch_fs *, const char *);
+
+bool bch2_fs_emergency_read_only(struct bch_fs *);
+void bch2_fs_read_only(struct bch_fs *);
+
+int bch2_fs_read_write(struct bch_fs *);
+int bch2_fs_read_write_early(struct bch_fs *);
+
+/*
+ * Only for use in the recovery/fsck path:
+ */
+static inline void bch2_fs_lazy_rw(struct bch_fs *c)
+{
+ if (!test_bit(BCH_FS_RW, &c->flags) &&
+ !test_bit(BCH_FS_WAS_RW, &c->flags))
+ bch2_fs_read_write_early(c);
+}
+
+void __bch2_fs_stop(struct bch_fs *);
+void bch2_fs_free(struct bch_fs *);
+void bch2_fs_stop(struct bch_fs *);
+
+int bch2_fs_start(struct bch_fs *);
+struct bch_fs *bch2_fs_open(char * const *, unsigned, struct bch_opts);
+
+#endif /* _BCACHEFS_SUPER_H */
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
new file mode 100644
index 000000000000..78d6138db62d
--- /dev/null
+++ b/fs/bcachefs/super_types.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SUPER_TYPES_H
+#define _BCACHEFS_SUPER_TYPES_H
+
+struct bch_sb_handle {
+ struct bch_sb *sb;
+ struct block_device *bdev;
+ struct bio *bio;
+ void *holder;
+ size_t buffer_size;
+ blk_mode_t mode;
+ unsigned have_layout:1;
+ unsigned have_bio:1;
+ unsigned fs_sb:1;
+ u64 seq;
+};
+
+struct bch_devs_mask {
+ unsigned long d[BITS_TO_LONGS(BCH_SB_MEMBERS_MAX)];
+};
+
+struct bch_devs_list {
+ u8 nr;
+ u8 devs[BCH_BKEY_PTRS_MAX];
+};
+
+struct bch_member_cpu {
+ u64 nbuckets; /* device size */
+ u16 first_bucket; /* index of first bucket used */
+ u16 bucket_size; /* sectors */
+ u16 group;
+ u8 state;
+ u8 discard;
+ u8 data_allowed;
+ u8 durability;
+ u8 freespace_initialized;
+ u8 valid;
+};
+
+struct bch_disk_group_cpu {
+ bool deleted;
+ u16 parent;
+ struct bch_devs_mask devs;
+};
+
+struct bch_disk_groups_cpu {
+ struct rcu_head rcu;
+ unsigned nr;
+ struct bch_disk_group_cpu entries[] __counted_by(nr);
+};
+
+#endif /* _BCACHEFS_SUPER_TYPES_H */
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
new file mode 100644
index 000000000000..eb764b9a4629
--- /dev/null
+++ b/fs/bcachefs/sysfs.c
@@ -0,0 +1,1031 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bcache sysfs interfaces
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#ifndef NO_BCACHEFS_SYSFS
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "alloc_foreground.h"
+#include "sysfs.h"
+#include "btree_cache.h"
+#include "btree_io.h"
+#include "btree_iter.h"
+#include "btree_key_cache.h"
+#include "btree_update.h"
+#include "btree_update_interior.h"
+#include "btree_gc.h"
+#include "buckets.h"
+#include "clock.h"
+#include "disk_groups.h"
+#include "ec.h"
+#include "inode.h"
+#include "journal.h"
+#include "keylist.h"
+#include "move.h"
+#include "movinggc.h"
+#include "nocow_locking.h"
+#include "opts.h"
+#include "rebalance.h"
+#include "replicas.h"
+#include "super-io.h"
+#include "tests.h"
+
+#include <linux/blkdev.h>
+#include <linux/sort.h>
+#include <linux/sched/clock.h>
+
+#include "util.h"
+
+#define SYSFS_OPS(type) \
+const struct sysfs_ops type ## _sysfs_ops = { \
+ .show = type ## _show, \
+ .store = type ## _store \
+}
+
+#define SHOW(fn) \
+static ssize_t fn ## _to_text(struct printbuf *, \
+ struct kobject *, struct attribute *); \
+ \
+static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
+ char *buf) \
+{ \
+ struct printbuf out = PRINTBUF; \
+ ssize_t ret = fn ## _to_text(&out, kobj, attr); \
+ \
+ if (out.pos && out.buf[out.pos - 1] != '\n') \
+ prt_newline(&out); \
+ \
+ if (!ret && out.allocation_failure) \
+ ret = -ENOMEM; \
+ \
+ if (!ret) { \
+ ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
+ memcpy(buf, out.buf, ret); \
+ } \
+ printbuf_exit(&out); \
+ return bch2_err_class(ret); \
+} \
+ \
+static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
+ struct attribute *attr)
+
+#define STORE(fn) \
+static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
+ const char *, size_t); \
+ \
+static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
+ const char *buf, size_t size) \
+{ \
+ return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \
+} \
+ \
+static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
+ const char *buf, size_t size)
+
+#define __sysfs_attribute(_name, _mode) \
+ static struct attribute sysfs_##_name = \
+ { .name = #_name, .mode = _mode }
+
+#define write_attribute(n) __sysfs_attribute(n, 0200)
+#define read_attribute(n) __sysfs_attribute(n, 0444)
+#define rw_attribute(n) __sysfs_attribute(n, 0644)
+
+#define sysfs_printf(file, fmt, ...) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ prt_printf(out, fmt "\n", __VA_ARGS__); \
+} while (0)
+
+#define sysfs_print(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ snprint(out, var); \
+} while (0)
+
+#define sysfs_hprint(file, val) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ prt_human_readable_s64(out, val); \
+} while (0)
+
+#define sysfs_strtoul(file, var) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return strtoul_safe(buf, var) ?: (ssize_t) size; \
+} while (0)
+
+#define sysfs_strtoul_clamp(file, var, min, max) \
+do { \
+ if (attr == &sysfs_ ## file) \
+ return strtoul_safe_clamp(buf, var, min, max) \
+ ?: (ssize_t) size; \
+} while (0)
+
+#define strtoul_or_return(cp) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (_r) \
+ return _r; \
+ _v; \
+})
+
+write_attribute(trigger_gc);
+write_attribute(trigger_discards);
+write_attribute(trigger_invalidates);
+write_attribute(prune_cache);
+write_attribute(btree_wakeup);
+rw_attribute(btree_gc_periodic);
+rw_attribute(gc_gens_pos);
+
+read_attribute(uuid);
+read_attribute(minor);
+read_attribute(bucket_size);
+read_attribute(first_bucket);
+read_attribute(nbuckets);
+rw_attribute(durability);
+read_attribute(iodone);
+
+read_attribute(io_latency_read);
+read_attribute(io_latency_write);
+read_attribute(io_latency_stats_read);
+read_attribute(io_latency_stats_write);
+read_attribute(congested);
+
+read_attribute(btree_write_stats);
+
+read_attribute(btree_cache_size);
+read_attribute(compression_stats);
+read_attribute(journal_debug);
+read_attribute(btree_updates);
+read_attribute(btree_cache);
+read_attribute(btree_key_cache);
+read_attribute(stripes_heap);
+read_attribute(open_buckets);
+read_attribute(open_buckets_partial);
+read_attribute(write_points);
+read_attribute(nocow_lock_table);
+
+#ifdef BCH_WRITE_REF_DEBUG
+read_attribute(write_refs);
+
+static const char * const bch2_write_refs[] = {
+#define x(n) #n,
+ BCH_WRITE_REFS()
+#undef x
+ NULL
+};
+
+static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ bch2_printbuf_tabstop_push(out, 24);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
+ prt_str(out, bch2_write_refs[i]);
+ prt_tab(out);
+ prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
+ prt_newline(out);
+ }
+}
+#endif
+
+read_attribute(internal_uuid);
+read_attribute(disk_groups);
+
+read_attribute(has_data);
+read_attribute(alloc_debug);
+
+#define x(t, n, ...) read_attribute(t);
+BCH_PERSISTENT_COUNTERS()
+#undef x
+
+rw_attribute(discard);
+rw_attribute(label);
+
+rw_attribute(copy_gc_enabled);
+read_attribute(copy_gc_wait);
+
+rw_attribute(rebalance_enabled);
+sysfs_pd_controller_attribute(rebalance);
+read_attribute(rebalance_work);
+rw_attribute(promote_whole_extents);
+
+read_attribute(new_stripes);
+
+read_attribute(io_timers_read);
+read_attribute(io_timers_write);
+
+read_attribute(moving_ctxts);
+
+#ifdef CONFIG_BCACHEFS_TESTS
+write_attribute(perf_test);
+#endif /* CONFIG_BCACHEFS_TESTS */
+
+#define x(_name) \
+ static struct attribute sysfs_time_stat_##_name = \
+ { .name = #_name, .mode = 0444 };
+ BCH_TIME_STATS()
+#undef x
+
+static struct attribute sysfs_state_rw = {
+ .name = "state",
+ .mode = 0444,
+};
+
+static size_t bch2_btree_cache_size(struct bch_fs *c)
+{
+ size_t ret = 0;
+ struct btree *b;
+
+ mutex_lock(&c->btree_cache.lock);
+ list_for_each_entry(b, &c->btree_cache.live, list)
+ ret += btree_bytes(c);
+
+ mutex_unlock(&c->btree_cache.lock);
+ return ret;
+}
+
+static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ enum btree_id id;
+ u64 nr_uncompressed_extents = 0,
+ nr_compressed_extents = 0,
+ nr_incompressible_extents = 0,
+ uncompressed_sectors = 0,
+ incompressible_sectors = 0,
+ compressed_sectors_compressed = 0,
+ compressed_sectors_uncompressed = 0;
+ int ret = 0;
+
+ if (!test_bit(BCH_FS_STARTED, &c->flags))
+ return -EPERM;
+
+ trans = bch2_trans_get(c);
+
+ for (id = 0; id < BTREE_ID_NR; id++) {
+ if (!btree_type_has_ptrs(id))
+ continue;
+
+ for_each_btree_key(trans, iter, id, POS_MIN,
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ bool compressed = false, uncompressed = false, incompressible = false;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ switch (p.crc.compression_type) {
+ case BCH_COMPRESSION_TYPE_none:
+ uncompressed = true;
+ uncompressed_sectors += k.k->size;
+ break;
+ case BCH_COMPRESSION_TYPE_incompressible:
+ incompressible = true;
+ incompressible_sectors += k.k->size;
+ break;
+ default:
+ compressed_sectors_compressed +=
+ p.crc.compressed_size;
+ compressed_sectors_uncompressed +=
+ p.crc.uncompressed_size;
+ compressed = true;
+ break;
+ }
+ }
+
+ if (incompressible)
+ nr_incompressible_extents++;
+ else if (uncompressed)
+ nr_uncompressed_extents++;
+ else if (compressed)
+ nr_compressed_extents++;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ bch2_trans_put(trans);
+
+ if (ret)
+ return ret;
+
+ prt_printf(out, "uncompressed:\n");
+ prt_printf(out, " nr extents: %llu\n", nr_uncompressed_extents);
+ prt_printf(out, " size: ");
+ prt_human_readable_u64(out, uncompressed_sectors << 9);
+ prt_printf(out, "\n");
+
+ prt_printf(out, "compressed:\n");
+ prt_printf(out, " nr extents: %llu\n", nr_compressed_extents);
+ prt_printf(out, " compressed size: ");
+ prt_human_readable_u64(out, compressed_sectors_compressed << 9);
+ prt_printf(out, "\n");
+ prt_printf(out, " uncompressed size: ");
+ prt_human_readable_u64(out, compressed_sectors_uncompressed << 9);
+ prt_printf(out, "\n");
+
+ prt_printf(out, "incompressible:\n");
+ prt_printf(out, " nr extents: %llu\n", nr_incompressible_extents);
+ prt_printf(out, " size: ");
+ prt_human_readable_u64(out, incompressible_sectors << 9);
+ prt_printf(out, "\n");
+ return 0;
+}
+
+static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ prt_printf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
+ bch2_bpos_to_text(out, c->gc_gens_pos);
+ prt_printf(out, "\n");
+}
+
+static void bch2_btree_wakeup_all(struct bch_fs *c)
+{
+ struct btree_trans *trans;
+
+ seqmutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
+
+ if (b)
+ six_lock_wakeup_all(&b->lock);
+
+ }
+ seqmutex_unlock(&c->btree_trans_lock);
+}
+
+SHOW(bch2_fs)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
+
+ sysfs_print(minor, c->minor);
+ sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
+
+ sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
+
+ if (attr == &sysfs_btree_write_stats)
+ bch2_btree_write_stats_to_text(out, c);
+
+ sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
+
+ if (attr == &sysfs_gc_gens_pos)
+ bch2_gc_gens_pos_to_text(out, c);
+
+ sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
+
+ sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
+ sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
+
+ if (attr == &sysfs_copy_gc_wait)
+ bch2_copygc_wait_to_text(out, c);
+
+ if (attr == &sysfs_rebalance_work)
+ bch2_rebalance_work_to_text(out, c);
+
+ sysfs_print(promote_whole_extents, c->promote_whole_extents);
+
+ /* Debugging: */
+
+ if (attr == &sysfs_journal_debug)
+ bch2_journal_debug_to_text(out, &c->journal);
+
+ if (attr == &sysfs_btree_updates)
+ bch2_btree_updates_to_text(out, c);
+
+ if (attr == &sysfs_btree_cache)
+ bch2_btree_cache_to_text(out, c);
+
+ if (attr == &sysfs_btree_key_cache)
+ bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
+
+ if (attr == &sysfs_stripes_heap)
+ bch2_stripes_heap_to_text(out, c);
+
+ if (attr == &sysfs_open_buckets)
+ bch2_open_buckets_to_text(out, c);
+
+ if (attr == &sysfs_open_buckets_partial)
+ bch2_open_buckets_partial_to_text(out, c);
+
+ if (attr == &sysfs_write_points)
+ bch2_write_points_to_text(out, c);
+
+ if (attr == &sysfs_compression_stats)
+ bch2_compression_stats_to_text(out, c);
+
+ if (attr == &sysfs_new_stripes)
+ bch2_new_stripes_to_text(out, c);
+
+ if (attr == &sysfs_io_timers_read)
+ bch2_io_timers_to_text(out, &c->io_clock[READ]);
+
+ if (attr == &sysfs_io_timers_write)
+ bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
+
+ if (attr == &sysfs_moving_ctxts)
+ bch2_fs_moving_ctxts_to_text(out, c);
+
+#ifdef BCH_WRITE_REF_DEBUG
+ if (attr == &sysfs_write_refs)
+ bch2_write_refs_to_text(out, c);
+#endif
+
+ if (attr == &sysfs_nocow_lock_table)
+ bch2_nocow_locks_to_text(out, &c->nocow_locks);
+
+ if (attr == &sysfs_disk_groups)
+ bch2_disk_groups_to_text(out, c);
+
+ return 0;
+}
+
+STORE(bch2_fs)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
+
+ if (attr == &sysfs_btree_gc_periodic) {
+ ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
+ ?: (ssize_t) size;
+
+ wake_up_process(c->gc_thread);
+ return ret;
+ }
+
+ if (attr == &sysfs_copy_gc_enabled) {
+ ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
+ ?: (ssize_t) size;
+
+ if (c->copygc_thread)
+ wake_up_process(c->copygc_thread);
+ return ret;
+ }
+
+ if (attr == &sysfs_rebalance_enabled) {
+ ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
+ ?: (ssize_t) size;
+
+ rebalance_wakeup(c);
+ return ret;
+ }
+
+ sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
+
+ sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
+
+ /* Debugging: */
+
+ if (!test_bit(BCH_FS_STARTED, &c->flags))
+ return -EPERM;
+
+ /* Debugging: */
+
+ if (!test_bit(BCH_FS_RW, &c->flags))
+ return -EROFS;
+
+ if (attr == &sysfs_prune_cache) {
+ struct shrink_control sc;
+
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = strtoul_or_return(buf);
+ c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
+ }
+
+ if (attr == &sysfs_btree_wakeup)
+ bch2_btree_wakeup_all(c);
+
+ if (attr == &sysfs_trigger_gc) {
+ /*
+ * Full gc is currently incompatible with btree key cache:
+ */
+#if 0
+ down_read(&c->state_lock);
+ bch2_gc(c, false, false);
+ up_read(&c->state_lock);
+#else
+ bch2_gc_gens(c);
+#endif
+ }
+
+ if (attr == &sysfs_trigger_discards)
+ bch2_do_discards(c);
+
+ if (attr == &sysfs_trigger_invalidates)
+ bch2_do_invalidates(c);
+
+#ifdef CONFIG_BCACHEFS_TESTS
+ if (attr == &sysfs_perf_test) {
+ char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
+ char *test = strsep(&p, " \t\n");
+ char *nr_str = strsep(&p, " \t\n");
+ char *threads_str = strsep(&p, " \t\n");
+ unsigned threads;
+ u64 nr;
+ int ret = -EINVAL;
+
+ if (threads_str &&
+ !(ret = kstrtouint(threads_str, 10, &threads)) &&
+ !(ret = bch2_strtoull_h(nr_str, &nr)))
+ ret = bch2_btree_perf_test(c, test, nr, threads);
+ kfree(tmp);
+
+ if (ret)
+ size = ret;
+ }
+#endif
+ return size;
+}
+SYSFS_OPS(bch2_fs);
+
+struct attribute *bch2_fs_files[] = {
+ &sysfs_minor,
+ &sysfs_btree_cache_size,
+ &sysfs_btree_write_stats,
+
+ &sysfs_promote_whole_extents,
+
+ &sysfs_compression_stats,
+
+#ifdef CONFIG_BCACHEFS_TESTS
+ &sysfs_perf_test,
+#endif
+ NULL
+};
+
+/* counters dir */
+
+SHOW(bch2_fs_counters)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
+ u64 counter = 0;
+ u64 counter_since_mount = 0;
+
+ printbuf_tabstop_push(out, 32);
+
+ #define x(t, ...) \
+ if (attr == &sysfs_##t) { \
+ counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
+ counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
+ prt_printf(out, "since mount:"); \
+ prt_tab(out); \
+ prt_human_readable_u64(out, counter_since_mount); \
+ prt_newline(out); \
+ \
+ prt_printf(out, "since filesystem creation:"); \
+ prt_tab(out); \
+ prt_human_readable_u64(out, counter); \
+ prt_newline(out); \
+ }
+ BCH_PERSISTENT_COUNTERS()
+ #undef x
+ return 0;
+}
+
+STORE(bch2_fs_counters) {
+ return 0;
+}
+
+SYSFS_OPS(bch2_fs_counters);
+
+struct attribute *bch2_fs_counters_files[] = {
+#define x(t, ...) \
+ &sysfs_##t,
+ BCH_PERSISTENT_COUNTERS()
+#undef x
+ NULL
+};
+/* internal dir - just a wrapper */
+
+SHOW(bch2_fs_internal)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
+ return bch2_fs_to_text(out, &c->kobj, attr);
+}
+
+STORE(bch2_fs_internal)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
+
+ return bch2_fs_store(&c->kobj, attr, buf, size);
+}
+SYSFS_OPS(bch2_fs_internal);
+
+struct attribute *bch2_fs_internal_files[] = {
+ &sysfs_journal_debug,
+ &sysfs_btree_updates,
+ &sysfs_btree_cache,
+ &sysfs_btree_key_cache,
+ &sysfs_new_stripes,
+ &sysfs_stripes_heap,
+ &sysfs_open_buckets,
+ &sysfs_open_buckets_partial,
+ &sysfs_write_points,
+#ifdef BCH_WRITE_REF_DEBUG
+ &sysfs_write_refs,
+#endif
+ &sysfs_nocow_lock_table,
+ &sysfs_io_timers_read,
+ &sysfs_io_timers_write,
+
+ &sysfs_trigger_gc,
+ &sysfs_trigger_discards,
+ &sysfs_trigger_invalidates,
+ &sysfs_prune_cache,
+ &sysfs_btree_wakeup,
+
+ &sysfs_gc_gens_pos,
+
+ &sysfs_copy_gc_enabled,
+ &sysfs_copy_gc_wait,
+
+ &sysfs_rebalance_enabled,
+ &sysfs_rebalance_work,
+ sysfs_pd_controller_files(rebalance),
+
+ &sysfs_moving_ctxts,
+
+ &sysfs_internal_uuid,
+
+ &sysfs_disk_groups,
+ NULL
+};
+
+/* options */
+
+SHOW(bch2_fs_opts_dir)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
+ const struct bch_option *opt = container_of(attr, struct bch_option, attr);
+ int id = opt - bch2_opt_table;
+ u64 v = bch2_opt_get_by_id(&c->opts, id);
+
+ bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
+ prt_char(out, '\n');
+
+ return 0;
+}
+
+STORE(bch2_fs_opts_dir)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
+ const struct bch_option *opt = container_of(attr, struct bch_option, attr);
+ int ret, id = opt - bch2_opt_table;
+ char *tmp;
+ u64 v;
+
+ /*
+ * We don't need to take c->writes for correctness, but it eliminates an
+ * unsightly error message in the dmesg log when we're RO:
+ */
+ if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
+ return -EROFS;
+
+ tmp = kstrdup(buf, GFP_KERNEL);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
+ kfree(tmp);
+
+ if (ret < 0)
+ goto err;
+
+ ret = bch2_opt_check_may_set(c, id, v);
+ if (ret < 0)
+ goto err;
+
+ bch2_opt_set_sb(c, opt, v);
+ bch2_opt_set_by_id(&c->opts, id, v);
+
+ if ((id == Opt_background_target ||
+ id == Opt_background_compression) && v) {
+ bch2_rebalance_add_work(c, S64_MAX);
+ rebalance_wakeup(c);
+ }
+
+ ret = size;
+err:
+ bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
+ return ret;
+}
+SYSFS_OPS(bch2_fs_opts_dir);
+
+struct attribute *bch2_fs_opts_dir_files[] = { NULL };
+
+int bch2_opts_create_sysfs_files(struct kobject *kobj)
+{
+ const struct bch_option *i;
+ int ret;
+
+ for (i = bch2_opt_table;
+ i < bch2_opt_table + bch2_opts_nr;
+ i++) {
+ if (!(i->flags & OPT_FS))
+ continue;
+
+ ret = sysfs_create_file(kobj, &i->attr);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* time stats */
+
+SHOW(bch2_fs_time_stats)
+{
+ struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
+
+#define x(name) \
+ if (attr == &sysfs_time_stat_##name) \
+ bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
+ BCH_TIME_STATS()
+#undef x
+
+ return 0;
+}
+
+STORE(bch2_fs_time_stats)
+{
+ return size;
+}
+SYSFS_OPS(bch2_fs_time_stats);
+
+struct attribute *bch2_fs_time_stats_files[] = {
+#define x(name) \
+ &sysfs_time_stat_##name,
+ BCH_TIME_STATS()
+#undef x
+ NULL
+};
+
+static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
+ unsigned i, nr[BCH_DATA_NR];
+
+ memset(nr, 0, sizeof(nr));
+
+ for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
+ nr[c->open_buckets[i].data_type]++;
+
+ printbuf_tabstop_push(out, 8);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+
+ prt_tab(out);
+ prt_str(out, "buckets");
+ prt_tab_rjust(out);
+ prt_str(out, "sectors");
+ prt_tab_rjust(out);
+ prt_str(out, "fragmented");
+ prt_tab_rjust(out);
+ prt_newline(out);
+
+ for (i = 0; i < BCH_DATA_NR; i++) {
+ prt_str(out, bch2_data_types[i]);
+ prt_tab(out);
+ prt_u64(out, stats.d[i].buckets);
+ prt_tab_rjust(out);
+ prt_u64(out, stats.d[i].sectors);
+ prt_tab_rjust(out);
+ prt_u64(out, stats.d[i].fragmented);
+ prt_tab_rjust(out);
+ prt_newline(out);
+ }
+
+ prt_str(out, "ec");
+ prt_tab(out);
+ prt_u64(out, stats.buckets_ec);
+ prt_tab_rjust(out);
+ prt_newline(out);
+
+ prt_newline(out);
+
+ prt_printf(out, "reserves:");
+ prt_newline(out);
+ for (i = 0; i < BCH_WATERMARK_NR; i++) {
+ prt_str(out, bch2_watermarks[i]);
+ prt_tab(out);
+ prt_u64(out, bch2_dev_buckets_reserved(ca, i));
+ prt_tab_rjust(out);
+ prt_newline(out);
+ }
+
+ prt_newline(out);
+
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 24);
+
+ prt_str(out, "freelist_wait");
+ prt_tab(out);
+ prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
+ prt_newline(out);
+
+ prt_str(out, "open buckets allocated");
+ prt_tab(out);
+ prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
+ prt_newline(out);
+
+ prt_str(out, "open buckets this dev");
+ prt_tab(out);
+ prt_u64(out, ca->nr_open_buckets);
+ prt_newline(out);
+
+ prt_str(out, "open buckets total");
+ prt_tab(out);
+ prt_u64(out, OPEN_BUCKETS_COUNT);
+ prt_newline(out);
+
+ prt_str(out, "open_buckets_wait");
+ prt_tab(out);
+ prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
+ prt_newline(out);
+
+ prt_str(out, "open_buckets_btree");
+ prt_tab(out);
+ prt_u64(out, nr[BCH_DATA_btree]);
+ prt_newline(out);
+
+ prt_str(out, "open_buckets_user");
+ prt_tab(out);
+ prt_u64(out, nr[BCH_DATA_user]);
+ prt_newline(out);
+
+ prt_str(out, "buckets_to_invalidate");
+ prt_tab(out);
+ prt_u64(out, should_invalidate_buckets(ca, stats));
+ prt_newline(out);
+
+ prt_str(out, "btree reserve cache");
+ prt_tab(out);
+ prt_u64(out, c->btree_reserve_cache_nr);
+ prt_newline(out);
+}
+
+static const char * const bch2_rw[] = {
+ "read",
+ "write",
+ NULL
+};
+
+static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
+{
+ int rw, i;
+
+ for (rw = 0; rw < 2; rw++) {
+ prt_printf(out, "%s:\n", bch2_rw[rw]);
+
+ for (i = 1; i < BCH_DATA_NR; i++)
+ prt_printf(out, "%-12s:%12llu\n",
+ bch2_data_types[i],
+ percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
+ }
+}
+
+SHOW(bch2_dev)
+{
+ struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
+ struct bch_fs *c = ca->fs;
+
+ sysfs_printf(uuid, "%pU\n", ca->uuid.b);
+
+ sysfs_print(bucket_size, bucket_bytes(ca));
+ sysfs_print(first_bucket, ca->mi.first_bucket);
+ sysfs_print(nbuckets, ca->mi.nbuckets);
+ sysfs_print(durability, ca->mi.durability);
+ sysfs_print(discard, ca->mi.discard);
+
+ if (attr == &sysfs_label) {
+ if (ca->mi.group) {
+ mutex_lock(&c->sb_lock);
+ bch2_disk_path_to_text(out, c->disk_sb.sb,
+ ca->mi.group - 1);
+ mutex_unlock(&c->sb_lock);
+ }
+
+ prt_char(out, '\n');
+ }
+
+ if (attr == &sysfs_has_data) {
+ prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca));
+ prt_char(out, '\n');
+ }
+
+ if (attr == &sysfs_state_rw) {
+ prt_string_option(out, bch2_member_states, ca->mi.state);
+ prt_char(out, '\n');
+ }
+
+ if (attr == &sysfs_iodone)
+ dev_iodone_to_text(out, ca);
+
+ sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
+ sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
+
+ if (attr == &sysfs_io_latency_stats_read)
+ bch2_time_stats_to_text(out, &ca->io_latency[READ]);
+
+ if (attr == &sysfs_io_latency_stats_write)
+ bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
+
+ sysfs_printf(congested, "%u%%",
+ clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
+ * 100 / CONGESTED_MAX);
+
+ if (attr == &sysfs_alloc_debug)
+ dev_alloc_debug_to_text(out, ca);
+
+ return 0;
+}
+
+STORE(bch2_dev)
+{
+ struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
+ struct bch_fs *c = ca->fs;
+ struct bch_member *mi;
+
+ if (attr == &sysfs_discard) {
+ bool v = strtoul_or_return(buf);
+
+ mutex_lock(&c->sb_lock);
+ mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+
+ if (v != BCH_MEMBER_DISCARD(mi)) {
+ SET_BCH_MEMBER_DISCARD(mi, v);
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+ }
+
+ if (attr == &sysfs_durability) {
+ u64 v = strtoul_or_return(buf);
+
+ mutex_lock(&c->sb_lock);
+ mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+
+ if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
+ SET_BCH_MEMBER_DURABILITY(mi, v + 1);
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+ }
+
+ if (attr == &sysfs_label) {
+ char *tmp;
+ int ret;
+
+ tmp = kstrdup(buf, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = bch2_dev_group_set(c, ca, strim(tmp));
+ kfree(tmp);
+ if (ret)
+ return ret;
+ }
+
+ return size;
+}
+SYSFS_OPS(bch2_dev);
+
+struct attribute *bch2_dev_files[] = {
+ &sysfs_uuid,
+ &sysfs_bucket_size,
+ &sysfs_first_bucket,
+ &sysfs_nbuckets,
+ &sysfs_durability,
+
+ /* settings: */
+ &sysfs_discard,
+ &sysfs_state_rw,
+ &sysfs_label,
+
+ &sysfs_has_data,
+ &sysfs_iodone,
+
+ &sysfs_io_latency_read,
+ &sysfs_io_latency_write,
+ &sysfs_io_latency_stats_read,
+ &sysfs_io_latency_stats_write,
+ &sysfs_congested,
+
+ /* debug: */
+ &sysfs_alloc_debug,
+ NULL
+};
+
+#endif /* _BCACHEFS_SYSFS_H_ */
diff --git a/fs/bcachefs/sysfs.h b/fs/bcachefs/sysfs.h
new file mode 100644
index 000000000000..222cd5062702
--- /dev/null
+++ b/fs/bcachefs/sysfs.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SYSFS_H_
+#define _BCACHEFS_SYSFS_H_
+
+#include <linux/sysfs.h>
+
+#ifndef NO_BCACHEFS_SYSFS
+
+struct attribute;
+struct sysfs_ops;
+
+extern struct attribute *bch2_fs_files[];
+extern struct attribute *bch2_fs_counters_files[];
+extern struct attribute *bch2_fs_internal_files[];
+extern struct attribute *bch2_fs_opts_dir_files[];
+extern struct attribute *bch2_fs_time_stats_files[];
+extern struct attribute *bch2_dev_files[];
+
+extern const struct sysfs_ops bch2_fs_sysfs_ops;
+extern const struct sysfs_ops bch2_fs_counters_sysfs_ops;
+extern const struct sysfs_ops bch2_fs_internal_sysfs_ops;
+extern const struct sysfs_ops bch2_fs_opts_dir_sysfs_ops;
+extern const struct sysfs_ops bch2_fs_time_stats_sysfs_ops;
+extern const struct sysfs_ops bch2_dev_sysfs_ops;
+
+int bch2_opts_create_sysfs_files(struct kobject *);
+
+#else
+
+static struct attribute *bch2_fs_files[] = {};
+static struct attribute *bch2_fs_counters_files[] = {};
+static struct attribute *bch2_fs_internal_files[] = {};
+static struct attribute *bch2_fs_opts_dir_files[] = {};
+static struct attribute *bch2_fs_time_stats_files[] = {};
+static struct attribute *bch2_dev_files[] = {};
+
+static const struct sysfs_ops bch2_fs_sysfs_ops;
+static const struct sysfs_ops bch2_fs_counters_sysfs_ops;
+static const struct sysfs_ops bch2_fs_internal_sysfs_ops;
+static const struct sysfs_ops bch2_fs_opts_dir_sysfs_ops;
+static const struct sysfs_ops bch2_fs_time_stats_sysfs_ops;
+static const struct sysfs_ops bch2_dev_sysfs_ops;
+
+static inline int bch2_opts_create_sysfs_files(struct kobject *kobj) { return 0; }
+
+#endif /* NO_BCACHEFS_SYSFS */
+
+#endif /* _BCACHEFS_SYSFS_H_ */
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
new file mode 100644
index 000000000000..2fc9e60c754b
--- /dev/null
+++ b/fs/bcachefs/tests.c
@@ -0,0 +1,919 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifdef CONFIG_BCACHEFS_TESTS
+
+#include "bcachefs.h"
+#include "btree_update.h"
+#include "journal_reclaim.h"
+#include "snapshot.h"
+#include "tests.h"
+
+#include "linux/kthread.h"
+#include "linux/random.h"
+
+static void delete_test_keys(struct bch_fs *c)
+{
+ int ret;
+
+ ret = bch2_btree_delete_range(c, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX),
+ POS(0, U64_MAX),
+ 0, NULL);
+ BUG_ON(ret);
+
+ ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX),
+ POS(0, U64_MAX),
+ 0, NULL);
+ BUG_ON(ret);
+}
+
+/* unit tests */
+
+static int test_delete(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_i_cookie k;
+ int ret;
+
+ bkey_cookie_init(&k.k_i);
+ k.k.p.snapshot = U32_MAX;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
+ BTREE_ITER_INTENT);
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, &k.k_i, 0));
+ bch_err_msg(c, ret, "update error");
+ if (ret)
+ goto err;
+
+ pr_info("deleting once");
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_delete_at(trans, &iter, 0));
+ bch_err_msg(c, ret, "delete error (first)");
+ if (ret)
+ goto err;
+
+ pr_info("deleting twice");
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_delete_at(trans, &iter, 0));
+ bch_err_msg(c, ret, "delete error (second)");
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int test_delete_written(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_i_cookie k;
+ int ret;
+
+ bkey_cookie_init(&k.k_i);
+ k.k.p.snapshot = U32_MAX;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
+ BTREE_ITER_INTENT);
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, &k.k_i, 0));
+ bch_err_msg(c, ret, "update error");
+ if (ret)
+ goto err;
+
+ bch2_trans_unlock(trans);
+ bch2_journal_flush_all_pins(&c->journal);
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_delete_at(trans, &iter, 0));
+ bch_err_msg(c, ret, "delete error");
+ if (ret)
+ goto err;
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int test_iterate(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter = { NULL };
+ struct bkey_s_c k;
+ u64 i;
+ int ret = 0;
+
+ delete_test_keys(c);
+
+ pr_info("inserting test keys");
+
+ for (i = 0; i < nr; i++) {
+ struct bkey_i_cookie ck;
+
+ bkey_cookie_init(&ck.k_i);
+ ck.k.p.offset = i;
+ ck.k.p.snapshot = U32_MAX;
+
+ ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
+ bch_err_msg(c, ret, "insert error");
+ if (ret)
+ goto err;
+ }
+
+ pr_info("iterating forwards");
+
+ i = 0;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
+ BUG_ON(k.k->p.offset != i++);
+ 0;
+ }));
+ bch_err_msg(c, ret, "error iterating forwards");
+ if (ret)
+ goto err;
+
+ BUG_ON(i != nr);
+
+ pr_info("iterating backwards");
+
+ ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
+ SPOS(0, U64_MAX, U32_MAX), 0, k,
+ ({
+ BUG_ON(k.k->p.offset != --i);
+ 0;
+ }));
+ bch_err_msg(c, ret, "error iterating backwards");
+ if (ret)
+ goto err;
+
+ BUG_ON(i);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int test_iterate_extents(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter = { NULL };
+ struct bkey_s_c k;
+ u64 i;
+ int ret = 0;
+
+ delete_test_keys(c);
+
+ pr_info("inserting test extents");
+
+ for (i = 0; i < nr; i += 8) {
+ struct bkey_i_cookie ck;
+
+ bkey_cookie_init(&ck.k_i);
+ ck.k.p.offset = i + 8;
+ ck.k.p.snapshot = U32_MAX;
+ ck.k.size = 8;
+
+ ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
+ bch_err_msg(c, ret, "insert error");
+ if (ret)
+ goto err;
+ }
+
+ pr_info("iterating forwards");
+
+ i = 0;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
+ BUG_ON(bkey_start_offset(k.k) != i);
+ i = k.k->p.offset;
+ 0;
+ }));
+ bch_err_msg(c, ret, "error iterating forwards");
+ if (ret)
+ goto err;
+
+ BUG_ON(i != nr);
+
+ pr_info("iterating backwards");
+
+ ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
+ SPOS(0, U64_MAX, U32_MAX), 0, k,
+ ({
+ BUG_ON(k.k->p.offset != i);
+ i = bkey_start_offset(k.k);
+ 0;
+ }));
+ bch_err_msg(c, ret, "error iterating backwards");
+ if (ret)
+ goto err;
+
+ BUG_ON(i);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int test_iterate_slots(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter = { NULL };
+ struct bkey_s_c k;
+ u64 i;
+ int ret = 0;
+
+ delete_test_keys(c);
+
+ pr_info("inserting test keys");
+
+ for (i = 0; i < nr; i++) {
+ struct bkey_i_cookie ck;
+
+ bkey_cookie_init(&ck.k_i);
+ ck.k.p.offset = i * 2;
+ ck.k.p.snapshot = U32_MAX;
+
+ ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
+ bch_err_msg(c, ret, "insert error");
+ if (ret)
+ goto err;
+ }
+
+ pr_info("iterating forwards");
+
+ i = 0;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
+ BUG_ON(k.k->p.offset != i);
+ i += 2;
+ 0;
+ }));
+ bch_err_msg(c, ret, "error iterating forwards");
+ if (ret)
+ goto err;
+
+ BUG_ON(i != nr * 2);
+
+ pr_info("iterating forwards by slots");
+
+ i = 0;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ BTREE_ITER_SLOTS, k, ({
+ if (i >= nr * 2)
+ break;
+
+ BUG_ON(k.k->p.offset != i);
+ BUG_ON(bkey_deleted(k.k) != (i & 1));
+
+ i++;
+ 0;
+ }));
+ if (ret < 0) {
+ bch_err_msg(c, ret, "error iterating forwards by slots");
+ goto err;
+ }
+ ret = 0;
+err:
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter = { NULL };
+ struct bkey_s_c k;
+ u64 i;
+ int ret = 0;
+
+ delete_test_keys(c);
+
+ pr_info("inserting test keys");
+
+ for (i = 0; i < nr; i += 16) {
+ struct bkey_i_cookie ck;
+
+ bkey_cookie_init(&ck.k_i);
+ ck.k.p.offset = i + 16;
+ ck.k.p.snapshot = U32_MAX;
+ ck.k.size = 8;
+
+ ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
+ bch_err_msg(c, ret, "insert error");
+ if (ret)
+ goto err;
+ }
+
+ pr_info("iterating forwards");
+
+ i = 0;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k, ({
+ BUG_ON(bkey_start_offset(k.k) != i + 8);
+ BUG_ON(k.k->size != 8);
+ i += 16;
+ 0;
+ }));
+ bch_err_msg(c, ret, "error iterating forwards");
+ if (ret)
+ goto err;
+
+ BUG_ON(i != nr);
+
+ pr_info("iterating forwards by slots");
+
+ i = 0;
+
+ ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ BTREE_ITER_SLOTS, k, ({
+ if (i == nr)
+ break;
+ BUG_ON(bkey_deleted(k.k) != !(i % 16));
+
+ BUG_ON(bkey_start_offset(k.k) != i);
+ BUG_ON(k.k->size != 8);
+ i = k.k->p.offset;
+ 0;
+ }));
+ bch_err_msg(c, ret, "error iterating forwards by slots");
+ if (ret)
+ goto err;
+ ret = 0;
+err:
+ bch2_trans_put(trans);
+ return 0;
+}
+
+/*
+ * XXX: we really want to make sure we've got a btree with depth > 0 for these
+ * tests
+ */
+static int test_peek_end(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), 0);
+
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
+ BUG_ON(k.k);
+
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
+ BUG_ON(k.k);
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return 0;
+}
+
+static int test_peek_end_extents(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
+ SPOS(0, 0, U32_MAX), 0);
+
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
+ BUG_ON(k.k);
+
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
+ BUG_ON(k.k);
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return 0;
+}
+
+/* extent unit tests */
+
+static u64 test_version;
+
+static int insert_test_extent(struct bch_fs *c,
+ u64 start, u64 end)
+{
+ struct bkey_i_cookie k;
+ int ret;
+
+ bkey_cookie_init(&k.k_i);
+ k.k_i.k.p.offset = end;
+ k.k_i.k.p.snapshot = U32_MAX;
+ k.k_i.k.size = end - start;
+ k.k_i.k.version.lo = test_version++;
+
+ ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int __test_extent_overwrite(struct bch_fs *c,
+ u64 e1_start, u64 e1_end,
+ u64 e2_start, u64 e2_end)
+{
+ int ret;
+
+ ret = insert_test_extent(c, e1_start, e1_end) ?:
+ insert_test_extent(c, e2_start, e2_end);
+
+ delete_test_keys(c);
+ return ret;
+}
+
+static int test_extent_overwrite_front(struct bch_fs *c, u64 nr)
+{
+ return __test_extent_overwrite(c, 0, 64, 0, 32) ?:
+ __test_extent_overwrite(c, 8, 64, 0, 32);
+}
+
+static int test_extent_overwrite_back(struct bch_fs *c, u64 nr)
+{
+ return __test_extent_overwrite(c, 0, 64, 32, 64) ?:
+ __test_extent_overwrite(c, 0, 64, 32, 72);
+}
+
+static int test_extent_overwrite_middle(struct bch_fs *c, u64 nr)
+{
+ return __test_extent_overwrite(c, 0, 64, 32, 40);
+}
+
+static int test_extent_overwrite_all(struct bch_fs *c, u64 nr)
+{
+ return __test_extent_overwrite(c, 32, 64, 0, 64) ?:
+ __test_extent_overwrite(c, 32, 64, 0, 128) ?:
+ __test_extent_overwrite(c, 32, 64, 32, 64) ?:
+ __test_extent_overwrite(c, 32, 64, 32, 128);
+}
+
+static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start, u32 len, u32 snapid)
+{
+ struct bkey_i_cookie k;
+ int ret;
+
+ bkey_cookie_init(&k.k_i);
+ k.k_i.k.p.inode = inum;
+ k.k_i.k.p.offset = start + len;
+ k.k_i.k.p.snapshot = snapid;
+ k.k_i.k.size = len;
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int test_extent_create_overlapping(struct bch_fs *c, u64 inum)
+{
+ return insert_test_overlapping_extent(c, inum, 0, 16, U32_MAX - 2) ?: /* overwrite entire */
+ insert_test_overlapping_extent(c, inum, 2, 8, U32_MAX - 2) ?:
+ insert_test_overlapping_extent(c, inum, 4, 4, U32_MAX) ?:
+ insert_test_overlapping_extent(c, inum, 32, 8, U32_MAX - 2) ?: /* overwrite front/back */
+ insert_test_overlapping_extent(c, inum, 36, 8, U32_MAX) ?:
+ insert_test_overlapping_extent(c, inum, 60, 8, U32_MAX - 2) ?:
+ insert_test_overlapping_extent(c, inum, 64, 8, U32_MAX);
+}
+
+/* snapshot unit tests */
+
+/* Test skipping over keys in unrelated snapshots: */
+static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
+{
+ struct btree_trans *trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_i_cookie cookie;
+ int ret;
+
+ bkey_cookie_init(&cookie.k_i);
+ cookie.k.p.snapshot = snapid_hi;
+ ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
+ if (ret)
+ return ret;
+
+ trans = bch2_trans_get(c);
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
+ SPOS(0, 0, snapid_lo), 0);
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
+
+ BUG_ON(k.k->p.snapshot != U32_MAX);
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int test_snapshots(struct bch_fs *c, u64 nr)
+{
+ struct bkey_i_cookie cookie;
+ u32 snapids[2];
+ u32 snapid_subvols[2] = { 1, 1 };
+ int ret;
+
+ bkey_cookie_init(&cookie.k_i);
+ cookie.k.p.snapshot = U32_MAX;
+ ret = bch2_btree_insert(c, BTREE_ID_xattrs, &cookie.k_i, NULL, 0);
+ if (ret)
+ return ret;
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_snapshot_node_create(trans, U32_MAX,
+ snapids,
+ snapid_subvols,
+ 2));
+ if (ret)
+ return ret;
+
+ if (snapids[0] > snapids[1])
+ swap(snapids[0], snapids[1]);
+
+ ret = test_snapshot_filter(c, snapids[0], snapids[1]);
+ bch_err_msg(c, ret, "from test_snapshot_filter");
+ return ret;
+}
+
+/* perf tests */
+
+static u64 test_rand(void)
+{
+ u64 v;
+
+ get_random_bytes(&v, sizeof(v));
+ return v;
+}
+
+static int rand_insert(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct bkey_i_cookie k;
+ int ret = 0;
+ u64 i;
+
+ for (i = 0; i < nr; i++) {
+ bkey_cookie_init(&k.k_i);
+ k.k.p.offset = test_rand();
+ k.k.p.snapshot = U32_MAX;
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0));
+ if (ret)
+ break;
+ }
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int rand_insert_multi(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct bkey_i_cookie k[8];
+ int ret = 0;
+ unsigned j;
+ u64 i;
+
+ for (i = 0; i < nr; i += ARRAY_SIZE(k)) {
+ for (j = 0; j < ARRAY_SIZE(k); j++) {
+ bkey_cookie_init(&k[j].k_i);
+ k[j].k.p.offset = test_rand();
+ k[j].k.p.snapshot = U32_MAX;
+ }
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
+ bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0));
+ if (ret)
+ break;
+ }
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int rand_lookup(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+ u64 i;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), 0);
+
+ for (i = 0; i < nr; i++) {
+ bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
+
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ ret = bkey_err(k);
+ if (ret)
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int rand_mixed_trans(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i_cookie *cookie,
+ u64 i, u64 pos)
+{
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
+
+ k = bch2_btree_iter_peek(iter);
+ ret = bkey_err(k);
+ bch_err_msg(trans->c, ret, "lookup error");
+ if (ret)
+ return ret;
+
+ if (!(i & 3) && k.k) {
+ bkey_cookie_init(&cookie->k_i);
+ cookie->k.p = iter->pos;
+ ret = bch2_trans_update(trans, iter, &cookie->k_i, 0);
+ }
+
+ return ret;
+}
+
+static int rand_mixed(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_i_cookie cookie;
+ int ret = 0;
+ u64 i, rand;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), 0);
+
+ for (i = 0; i < nr; i++) {
+ rand = test_rand();
+ ret = commit_do(trans, NULL, NULL, 0,
+ rand_mixed_trans(trans, &iter, &cookie, i, rand));
+ if (ret)
+ break;
+ }
+
+ bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int __do_delete(struct btree_trans *trans, struct bpos pos)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
+ BTREE_ITER_INTENT);
+ k = bch2_btree_iter_peek(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ if (!k.k)
+ goto err;
+
+ ret = bch2_btree_delete_at(trans, &iter, 0);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static int rand_delete(struct bch_fs *c, u64 nr)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ int ret = 0;
+ u64 i;
+
+ for (i = 0; i < nr; i++) {
+ struct bpos pos = SPOS(0, test_rand(), U32_MAX);
+
+ ret = commit_do(trans, NULL, NULL, 0,
+ __do_delete(trans, pos));
+ if (ret)
+ break;
+ }
+
+ bch2_trans_put(trans);
+ return ret;
+}
+
+static int seq_insert(struct bch_fs *c, u64 nr)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_i_cookie insert;
+
+ bkey_cookie_init(&insert.k_i);
+
+ return bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX),
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
+ NULL, NULL, 0, ({
+ if (iter.pos.offset >= nr)
+ break;
+ insert.k.p = iter.pos;
+ bch2_trans_update(trans, &iter, &insert.k_i, 0);
+ })));
+}
+
+static int seq_lookup(struct bch_fs *c, u64 nr)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+
+ return bch2_trans_run(c,
+ for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
+ 0, k,
+ 0));
+}
+
+static int seq_overwrite(struct bch_fs *c, u64 nr)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+
+ return bch2_trans_run(c,
+ for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX),
+ BTREE_ITER_INTENT, k,
+ NULL, NULL, 0, ({
+ struct bkey_i_cookie u;
+
+ bkey_reassemble(&u.k_i, k);
+ bch2_trans_update(trans, &iter, &u.k_i, 0);
+ })));
+}
+
+static int seq_delete(struct bch_fs *c, u64 nr)
+{
+ return bch2_btree_delete_range(c, BTREE_ID_xattrs,
+ SPOS(0, 0, U32_MAX),
+ POS(0, U64_MAX),
+ 0, NULL);
+}
+
+typedef int (*perf_test_fn)(struct bch_fs *, u64);
+
+struct test_job {
+ struct bch_fs *c;
+ u64 nr;
+ unsigned nr_threads;
+ perf_test_fn fn;
+
+ atomic_t ready;
+ wait_queue_head_t ready_wait;
+
+ atomic_t done;
+ struct completion done_completion;
+
+ u64 start;
+ u64 finish;
+ int ret;
+};
+
+static int btree_perf_test_thread(void *data)
+{
+ struct test_job *j = data;
+ int ret;
+
+ if (atomic_dec_and_test(&j->ready)) {
+ wake_up(&j->ready_wait);
+ j->start = sched_clock();
+ } else {
+ wait_event(j->ready_wait, !atomic_read(&j->ready));
+ }
+
+ ret = j->fn(j->c, div64_u64(j->nr, j->nr_threads));
+ if (ret) {
+ bch_err(j->c, "%ps: error %s", j->fn, bch2_err_str(ret));
+ j->ret = ret;
+ }
+
+ if (atomic_dec_and_test(&j->done)) {
+ j->finish = sched_clock();
+ complete(&j->done_completion);
+ }
+
+ return 0;
+}
+
+int bch2_btree_perf_test(struct bch_fs *c, const char *testname,
+ u64 nr, unsigned nr_threads)
+{
+ struct test_job j = { .c = c, .nr = nr, .nr_threads = nr_threads };
+ char name_buf[20];
+ struct printbuf nr_buf = PRINTBUF;
+ struct printbuf per_sec_buf = PRINTBUF;
+ unsigned i;
+ u64 time;
+
+ atomic_set(&j.ready, nr_threads);
+ init_waitqueue_head(&j.ready_wait);
+
+ atomic_set(&j.done, nr_threads);
+ init_completion(&j.done_completion);
+
+#define perf_test(_test) \
+ if (!strcmp(testname, #_test)) j.fn = _test
+
+ perf_test(rand_insert);
+ perf_test(rand_insert_multi);
+ perf_test(rand_lookup);
+ perf_test(rand_mixed);
+ perf_test(rand_delete);
+
+ perf_test(seq_insert);
+ perf_test(seq_lookup);
+ perf_test(seq_overwrite);
+ perf_test(seq_delete);
+
+ /* a unit test, not a perf test: */
+ perf_test(test_delete);
+ perf_test(test_delete_written);
+ perf_test(test_iterate);
+ perf_test(test_iterate_extents);
+ perf_test(test_iterate_slots);
+ perf_test(test_iterate_slots_extents);
+ perf_test(test_peek_end);
+ perf_test(test_peek_end_extents);
+
+ perf_test(test_extent_overwrite_front);
+ perf_test(test_extent_overwrite_back);
+ perf_test(test_extent_overwrite_middle);
+ perf_test(test_extent_overwrite_all);
+ perf_test(test_extent_create_overlapping);
+
+ perf_test(test_snapshots);
+
+ if (!j.fn) {
+ pr_err("unknown test %s", testname);
+ return -EINVAL;
+ }
+
+ //pr_info("running test %s:", testname);
+
+ if (nr_threads == 1)
+ btree_perf_test_thread(&j);
+ else
+ for (i = 0; i < nr_threads; i++)
+ kthread_run(btree_perf_test_thread, &j,
+ "bcachefs perf test[%u]", i);
+
+ while (wait_for_completion_interruptible(&j.done_completion))
+ ;
+
+ time = j.finish - j.start;
+
+ scnprintf(name_buf, sizeof(name_buf), "%s:", testname);
+ prt_human_readable_u64(&nr_buf, nr);
+ prt_human_readable_u64(&per_sec_buf, div64_u64(nr * NSEC_PER_SEC, time));
+ printk(KERN_INFO "%-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec\n",
+ name_buf, nr_buf.buf, nr_threads,
+ div_u64(time, NSEC_PER_SEC),
+ div_u64(time * nr_threads, nr),
+ per_sec_buf.buf);
+ printbuf_exit(&per_sec_buf);
+ printbuf_exit(&nr_buf);
+ return j.ret;
+}
+
+#endif /* CONFIG_BCACHEFS_TESTS */
diff --git a/fs/bcachefs/tests.h b/fs/bcachefs/tests.h
new file mode 100644
index 000000000000..c73b18aea7e0
--- /dev/null
+++ b/fs/bcachefs/tests.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_TEST_H
+#define _BCACHEFS_TEST_H
+
+struct bch_fs;
+
+#ifdef CONFIG_BCACHEFS_TESTS
+
+int bch2_btree_perf_test(struct bch_fs *, const char *, u64, unsigned);
+
+#else
+
+#endif /* CONFIG_BCACHEFS_TESTS */
+
+#endif /* _BCACHEFS_TEST_H */
diff --git a/fs/bcachefs/trace.c b/fs/bcachefs/trace.c
new file mode 100644
index 000000000000..33efa6005c6f
--- /dev/null
+++ b/fs/bcachefs/trace.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "bcachefs.h"
+#include "alloc_types.h"
+#include "buckets.h"
+#include "btree_cache.h"
+#include "btree_iter.h"
+#include "btree_locking.h"
+#include "btree_update_interior.h"
+#include "keylist.h"
+#include "opts.h"
+#include "six.h"
+
+#include <linux/blktrace_api.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
new file mode 100644
index 000000000000..19264492151b
--- /dev/null
+++ b/fs/bcachefs/trace.h
@@ -0,0 +1,1284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bcachefs
+
+#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BCACHEFS_H
+
+#include <linux/tracepoint.h>
+
+#define TRACE_BPOS_entries(name) \
+ __field(u64, name##_inode ) \
+ __field(u64, name##_offset ) \
+ __field(u32, name##_snapshot )
+
+#define TRACE_BPOS_assign(dst, src) \
+ __entry->dst##_inode = (src).inode; \
+ __entry->dst##_offset = (src).offset; \
+ __entry->dst##_snapshot = (src).snapshot
+
+DECLARE_EVENT_CLASS(bpos,
+ TP_PROTO(const struct bpos *p),
+ TP_ARGS(p),
+
+ TP_STRUCT__entry(
+ TRACE_BPOS_entries(p)
+ ),
+
+ TP_fast_assign(
+ TRACE_BPOS_assign(p, *p);
+ ),
+
+ TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
+);
+
+DECLARE_EVENT_CLASS(bkey,
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k),
+
+ TP_STRUCT__entry(
+ __string(k, k )
+ ),
+
+ TP_fast_assign(
+ __assign_str(k, k);
+ ),
+
+ TP_printk("%s", __get_str(k))
+);
+
+DECLARE_EVENT_CLASS(btree_node,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u8, level )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->level = b->c.level;
+ __entry->btree_id = b->c.btree_id;
+ TRACE_BPOS_assign(pos, b->key.k.p);
+ ),
+
+ TP_printk("%d,%d %u %s %llu:%llu:%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->level,
+ bch2_btree_ids[__entry->btree_id],
+ __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
+);
+
+DECLARE_EVENT_CLASS(bch_fs,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ ),
+
+ TP_printk("%d,%d", MAJOR(__entry->dev), MINOR(__entry->dev))
+);
+
+DECLARE_EVENT_CLASS(bio,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(sector_t, sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0;
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector, __entry->nr_sector)
+);
+
+/* super-io.c: */
+TRACE_EVENT(write_super,
+ TP_PROTO(struct bch_fs *c, unsigned long ip),
+ TP_ARGS(c, ip),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(unsigned long, ip )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->ip = ip;
+ ),
+
+ TP_printk("%d,%d for %pS",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (void *) __entry->ip)
+);
+
+/* io.c: */
+
+DEFINE_EVENT(bio, read_promote,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+TRACE_EVENT(read_nopromote,
+ TP_PROTO(struct bch_fs *c, int ret),
+ TP_ARGS(c, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __array(char, ret, 32 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
+ ),
+
+ TP_printk("%d,%d ret %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ret)
+);
+
+DEFINE_EVENT(bio, read_bounce,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bio, read_split,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bio, read_retry,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+DEFINE_EVENT(bio, read_reuse_race,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+/* Journal */
+
+DEFINE_EVENT(bch_fs, journal_full,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, journal_entry_full,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+DEFINE_EVENT(bio, journal_write,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio)
+);
+
+TRACE_EVENT(journal_reclaim_start,
+ TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
+ u64 min_nr, u64 min_key_cache,
+ u64 prereserved, u64 prereserved_total,
+ u64 btree_cache_dirty, u64 btree_cache_total,
+ u64 btree_key_cache_dirty, u64 btree_key_cache_total),
+ TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
+ btree_cache_dirty, btree_cache_total,
+ btree_key_cache_dirty, btree_key_cache_total),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(bool, direct )
+ __field(bool, kicked )
+ __field(u64, min_nr )
+ __field(u64, min_key_cache )
+ __field(u64, prereserved )
+ __field(u64, prereserved_total )
+ __field(u64, btree_cache_dirty )
+ __field(u64, btree_cache_total )
+ __field(u64, btree_key_cache_dirty )
+ __field(u64, btree_key_cache_total )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->direct = direct;
+ __entry->kicked = kicked;
+ __entry->min_nr = min_nr;
+ __entry->min_key_cache = min_key_cache;
+ __entry->prereserved = prereserved;
+ __entry->prereserved_total = prereserved_total;
+ __entry->btree_cache_dirty = btree_cache_dirty;
+ __entry->btree_cache_total = btree_cache_total;
+ __entry->btree_key_cache_dirty = btree_key_cache_dirty;
+ __entry->btree_key_cache_total = btree_key_cache_total;
+ ),
+
+ TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->direct,
+ __entry->kicked,
+ __entry->min_nr,
+ __entry->min_key_cache,
+ __entry->prereserved,
+ __entry->prereserved_total,
+ __entry->btree_cache_dirty,
+ __entry->btree_cache_total,
+ __entry->btree_key_cache_dirty,
+ __entry->btree_key_cache_total)
+);
+
+TRACE_EVENT(journal_reclaim_finish,
+ TP_PROTO(struct bch_fs *c, u64 nr_flushed),
+ TP_ARGS(c, nr_flushed),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u64, nr_flushed )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->nr_flushed = nr_flushed;
+ ),
+
+ TP_printk("%d,%d flushed %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->nr_flushed)
+);
+
+/* bset.c: */
+
+DEFINE_EVENT(bpos, bkey_pack_pos_fail,
+ TP_PROTO(const struct bpos *p),
+ TP_ARGS(p)
+);
+
+/* Btree cache: */
+
+TRACE_EVENT(btree_cache_scan,
+ TP_PROTO(long nr_to_scan, long can_free, long ret),
+ TP_ARGS(nr_to_scan, can_free, ret),
+
+ TP_STRUCT__entry(
+ __field(long, nr_to_scan )
+ __field(long, can_free )
+ __field(long, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->nr_to_scan = nr_to_scan;
+ __entry->can_free = can_free;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("scanned for %li nodes, can free %li, ret %li",
+ __entry->nr_to_scan, __entry->can_free, __entry->ret)
+);
+
+DEFINE_EVENT(btree_node, btree_cache_reap,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+/* Btree */
+
+DEFINE_EVENT(btree_node, btree_node_read,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+TRACE_EVENT(btree_node_write,
+ TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
+ TP_ARGS(b, bytes, sectors),
+
+ TP_STRUCT__entry(
+ __field(enum btree_node_type, type)
+ __field(unsigned, bytes )
+ __field(unsigned, sectors )
+ ),
+
+ TP_fast_assign(
+ __entry->type = btree_node_type(b);
+ __entry->bytes = bytes;
+ __entry->sectors = sectors;
+ ),
+
+ TP_printk("bkey type %u bytes %u sectors %u",
+ __entry->type , __entry->bytes, __entry->sectors)
+);
+
+DEFINE_EVENT(btree_node, btree_node_alloc,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_free,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+TRACE_EVENT(btree_reserve_get_fail,
+ TP_PROTO(const char *trans_fn,
+ unsigned long caller_ip,
+ size_t required,
+ int ret),
+ TP_ARGS(trans_fn, caller_ip, required, ret),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(size_t, required )
+ __array(char, ret, 32 )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->required = required;
+ strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret));
+ ),
+
+ TP_printk("%s %pS required %zu ret %s",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ __entry->required,
+ __entry->ret)
+);
+
+DEFINE_EVENT(btree_node, btree_node_compact,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_merge,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_split,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_rewrite,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+DEFINE_EVENT(btree_node, btree_node_set_root,
+ TP_PROTO(struct bch_fs *c, struct btree *b),
+ TP_ARGS(c, b)
+);
+
+TRACE_EVENT(btree_path_relock_fail,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path,
+ unsigned level),
+ TP_ARGS(trans, caller_ip, path, level),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u8, level )
+ TRACE_BPOS_entries(pos)
+ __array(char, node, 24 )
+ __field(u8, self_read_count )
+ __field(u8, self_intent_count)
+ __field(u8, read_count )
+ __field(u8, intent_count )
+ __field(u32, iter_lock_seq )
+ __field(u32, node_lock_seq )
+ ),
+
+ TP_fast_assign(
+ struct btree *b = btree_path_node(path, level);
+ struct six_lock_count c;
+
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = path->btree_id;
+ __entry->level = path->level;
+ TRACE_BPOS_assign(pos, path->pos);
+
+ c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
+ __entry->self_read_count = c.n[SIX_LOCK_read];
+ __entry->self_intent_count = c.n[SIX_LOCK_intent];
+
+ if (IS_ERR(b)) {
+ strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
+ } else {
+ c = six_lock_counts(&path->l[level].b->c.lock);
+ __entry->read_count = c.n[SIX_LOCK_read];
+ __entry->intent_count = c.n[SIX_LOCK_intent];
+ scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
+ }
+ __entry->iter_lock_seq = path->l[level].lock_seq;
+ __entry->node_lock_seq = is_btree_node(path, level)
+ ? six_lock_seq(&path->l[level].b->c.lock)
+ : 0;
+ ),
+
+ TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ bch2_btree_ids[__entry->btree_id],
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->level,
+ __entry->node,
+ __entry->self_read_count,
+ __entry->self_intent_count,
+ __entry->read_count,
+ __entry->intent_count,
+ __entry->iter_lock_seq,
+ __entry->node_lock_seq)
+);
+
+TRACE_EVENT(btree_path_upgrade_fail,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path,
+ unsigned level),
+ TP_ARGS(trans, caller_ip, path, level),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u8, level )
+ TRACE_BPOS_entries(pos)
+ __field(u8, locked )
+ __field(u8, self_read_count )
+ __field(u8, self_intent_count)
+ __field(u8, read_count )
+ __field(u8, intent_count )
+ __field(u32, iter_lock_seq )
+ __field(u32, node_lock_seq )
+ ),
+
+ TP_fast_assign(
+ struct six_lock_count c;
+
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = path->btree_id;
+ __entry->level = level;
+ TRACE_BPOS_assign(pos, path->pos);
+ __entry->locked = btree_node_locked(path, level);
+
+ c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
+ __entry->self_read_count = c.n[SIX_LOCK_read];
+ __entry->self_intent_count = c.n[SIX_LOCK_intent];
+ c = six_lock_counts(&path->l[level].b->c.lock);
+ __entry->read_count = c.n[SIX_LOCK_read];
+ __entry->intent_count = c.n[SIX_LOCK_intent];
+ __entry->iter_lock_seq = path->l[level].lock_seq;
+ __entry->node_lock_seq = is_btree_node(path, level)
+ ? six_lock_seq(&path->l[level].b->c.lock)
+ : 0;
+ ),
+
+ TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ bch2_btree_ids[__entry->btree_id],
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->level,
+ __entry->locked,
+ __entry->self_read_count,
+ __entry->self_intent_count,
+ __entry->read_count,
+ __entry->intent_count,
+ __entry->iter_lock_seq,
+ __entry->node_lock_seq)
+);
+
+/* Garbage collection */
+
+DEFINE_EVENT(bch_fs, gc_gens_start,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+DEFINE_EVENT(bch_fs, gc_gens_end,
+ TP_PROTO(struct bch_fs *c),
+ TP_ARGS(c)
+);
+
+/* Allocator */
+
+DECLARE_EVENT_CLASS(bucket_alloc,
+ TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
+ u64 bucket,
+ u64 free,
+ u64 avail,
+ u64 copygc_wait_amount,
+ s64 copygc_waiting_for,
+ struct bucket_alloc_state *s,
+ bool nonblocking,
+ const char *err),
+ TP_ARGS(ca, alloc_reserve, bucket, free, avail,
+ copygc_wait_amount, copygc_waiting_for,
+ s, nonblocking, err),
+
+ TP_STRUCT__entry(
+ __field(u8, dev )
+ __array(char, reserve, 16 )
+ __field(u64, bucket )
+ __field(u64, free )
+ __field(u64, avail )
+ __field(u64, copygc_wait_amount )
+ __field(s64, copygc_waiting_for )
+ __field(u64, seen )
+ __field(u64, open )
+ __field(u64, need_journal_commit )
+ __field(u64, nouse )
+ __field(bool, nonblocking )
+ __field(u64, nocow )
+ __array(char, err, 32 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = ca->dev_idx;
+ strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
+ __entry->bucket = bucket;
+ __entry->free = free;
+ __entry->avail = avail;
+ __entry->copygc_wait_amount = copygc_wait_amount;
+ __entry->copygc_waiting_for = copygc_waiting_for;
+ __entry->seen = s->buckets_seen;
+ __entry->open = s->skipped_open;
+ __entry->need_journal_commit = s->skipped_need_journal_commit;
+ __entry->nouse = s->skipped_nouse;
+ __entry->nonblocking = nonblocking;
+ __entry->nocow = s->skipped_nocow;
+ strscpy(__entry->err, err, sizeof(__entry->err));
+ ),
+
+ TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
+ __entry->reserve,
+ __entry->dev,
+ __entry->bucket,
+ __entry->free,
+ __entry->avail,
+ __entry->copygc_wait_amount,
+ __entry->copygc_waiting_for,
+ __entry->seen,
+ __entry->open,
+ __entry->need_journal_commit,
+ __entry->nouse,
+ __entry->nocow,
+ __entry->nonblocking,
+ __entry->err)
+);
+
+DEFINE_EVENT(bucket_alloc, bucket_alloc,
+ TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
+ u64 bucket,
+ u64 free,
+ u64 avail,
+ u64 copygc_wait_amount,
+ s64 copygc_waiting_for,
+ struct bucket_alloc_state *s,
+ bool nonblocking,
+ const char *err),
+ TP_ARGS(ca, alloc_reserve, bucket, free, avail,
+ copygc_wait_amount, copygc_waiting_for,
+ s, nonblocking, err)
+);
+
+DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
+ TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
+ u64 bucket,
+ u64 free,
+ u64 avail,
+ u64 copygc_wait_amount,
+ s64 copygc_waiting_for,
+ struct bucket_alloc_state *s,
+ bool nonblocking,
+ const char *err),
+ TP_ARGS(ca, alloc_reserve, bucket, free, avail,
+ copygc_wait_amount, copygc_waiting_for,
+ s, nonblocking, err)
+);
+
+TRACE_EVENT(discard_buckets,
+ TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
+ u64 need_journal_commit, u64 discarded, const char *err),
+ TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u64, seen )
+ __field(u64, open )
+ __field(u64, need_journal_commit )
+ __field(u64, discarded )
+ __array(char, err, 16 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->seen = seen;
+ __entry->open = open;
+ __entry->need_journal_commit = need_journal_commit;
+ __entry->discarded = discarded;
+ strscpy(__entry->err, err, sizeof(__entry->err));
+ ),
+
+ TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->seen,
+ __entry->open,
+ __entry->need_journal_commit,
+ __entry->discarded,
+ __entry->err)
+);
+
+TRACE_EVENT(bucket_invalidate,
+ TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
+ TP_ARGS(c, dev, bucket, sectors),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u32, dev_idx )
+ __field(u32, sectors )
+ __field(u64, bucket )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->dev_idx = dev;
+ __entry->sectors = sectors;
+ __entry->bucket = bucket;
+ ),
+
+ TP_printk("%d:%d invalidated %u:%llu cached sectors %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dev_idx, __entry->bucket,
+ __entry->sectors)
+);
+
+/* Moving IO */
+
+TRACE_EVENT(bucket_evacuate,
+ TP_PROTO(struct bch_fs *c, struct bpos *bucket),
+ TP_ARGS(c, bucket),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u32, dev_idx )
+ __field(u64, bucket )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->dev_idx = bucket->inode;
+ __entry->bucket = bucket->offset;
+ ),
+
+ TP_printk("%d:%d %u:%llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dev_idx, __entry->bucket)
+);
+
+DEFINE_EVENT(bkey, move_extent,
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
+);
+
+DEFINE_EVENT(bkey, move_extent_read,
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
+);
+
+DEFINE_EVENT(bkey, move_extent_write,
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
+);
+
+DEFINE_EVENT(bkey, move_extent_finish,
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
+);
+
+TRACE_EVENT(move_extent_fail,
+ TP_PROTO(struct bch_fs *c, const char *msg),
+ TP_ARGS(c, msg),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __string(msg, msg )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __assign_str(msg, msg);
+ ),
+
+ TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
+);
+
+DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
+ TP_PROTO(struct bch_fs *c, const char *k),
+ TP_ARGS(c, k)
+);
+
+TRACE_EVENT(move_data,
+ TP_PROTO(struct bch_fs *c, u64 sectors_moved,
+ u64 keys_moved),
+ TP_ARGS(c, sectors_moved, keys_moved),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u64, sectors_moved )
+ __field(u64, keys_moved )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->sectors_moved = sectors_moved;
+ __entry->keys_moved = keys_moved;
+ ),
+
+ TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->sectors_moved, __entry->keys_moved)
+);
+
+TRACE_EVENT(evacuate_bucket,
+ TP_PROTO(struct bch_fs *c, struct bpos *bucket,
+ unsigned sectors, unsigned bucket_size,
+ u64 fragmentation, int ret),
+ TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u64, member )
+ __field(u64, bucket )
+ __field(u32, sectors )
+ __field(u32, bucket_size )
+ __field(u64, fragmentation )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->member = bucket->inode;
+ __entry->bucket = bucket->offset;
+ __entry->sectors = sectors;
+ __entry->bucket_size = bucket_size;
+ __entry->fragmentation = fragmentation;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->member, __entry->bucket,
+ __entry->sectors, __entry->bucket_size,
+ __entry->fragmentation, __entry->ret)
+);
+
+TRACE_EVENT(copygc,
+ TP_PROTO(struct bch_fs *c,
+ u64 sectors_moved, u64 sectors_not_moved,
+ u64 buckets_moved, u64 buckets_not_moved),
+ TP_ARGS(c,
+ sectors_moved, sectors_not_moved,
+ buckets_moved, buckets_not_moved),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u64, sectors_moved )
+ __field(u64, sectors_not_moved )
+ __field(u64, buckets_moved )
+ __field(u64, buckets_not_moved )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->sectors_moved = sectors_moved;
+ __entry->sectors_not_moved = sectors_not_moved;
+ __entry->buckets_moved = buckets_moved;
+ __entry->buckets_not_moved = buckets_moved;
+ ),
+
+ TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->sectors_moved, __entry->sectors_not_moved,
+ __entry->buckets_moved, __entry->buckets_not_moved)
+);
+
+TRACE_EVENT(copygc_wait,
+ TP_PROTO(struct bch_fs *c,
+ u64 wait_amount, u64 until),
+ TP_ARGS(c, wait_amount, until),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(u64, wait_amount )
+ __field(u64, until )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = c->dev;
+ __entry->wait_amount = wait_amount;
+ __entry->until = until;
+ ),
+
+ TP_printk("%d,%u waiting for %llu sectors until %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->wait_amount, __entry->until)
+);
+
+/* btree transactions: */
+
+DECLARE_EVENT_CLASS(transaction_event,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ ),
+
+ TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
+);
+
+DEFINE_EVENT(transaction_event, transaction_commit,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_injected,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+TRACE_EVENT(trans_restart_split_race,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree *b),
+ TP_ARGS(trans, caller_ip, b),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(u8, level )
+ __field(u16, written )
+ __field(u16, blocks )
+ __field(u16, u64s_remaining )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->level = b->c.level;
+ __entry->written = b->written;
+ __entry->blocks = btree_blocks(trans->c);
+ __entry->u64s_remaining = bch_btree_keys_u64s_remaining(trans->c, b);
+ ),
+
+ TP_printk("%s %pS l=%u written %u/%u u64s remaining %u",
+ __entry->trans_fn, (void *) __entry->caller_ip,
+ __entry->level,
+ __entry->written, __entry->blocks,
+ __entry->u64s_remaining)
+);
+
+DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+TRACE_EVENT(trans_restart_journal_preres_get,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ unsigned flags),
+ TP_ARGS(trans, caller_ip, flags),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(unsigned, flags )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("%s %pS %x", __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ __entry->flags)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+DEFINE_EVENT(transaction_event, trans_traverse_all,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+DECLARE_EVENT_CLASS(transaction_restart_iter,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos)
+ ),
+
+ TP_printk("%s %pS btree %s pos %llu:%llu:%u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ bch2_btree_ids[__entry->btree_id],
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+TRACE_EVENT(trans_restart_upgrade,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path,
+ unsigned old_locks_want,
+ unsigned new_locks_want),
+ TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(u8, btree_id )
+ __field(u8, old_locks_want )
+ __field(u8, new_locks_want )
+ TRACE_BPOS_entries(pos)
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->btree_id = path->btree_id;
+ __entry->old_locks_want = old_locks_want;
+ __entry->new_locks_want = new_locks_want;
+ TRACE_BPOS_assign(pos, path->pos)
+ ),
+
+ TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ bch2_btree_ids[__entry->btree_id],
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->old_locks_want,
+ __entry->new_locks_want)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path),
+ TP_ARGS(trans, caller_ip, path)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_would_deadlock,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+TRACE_EVENT(trans_restart_would_deadlock_write,
+ TP_PROTO(struct btree_trans *trans),
+ TP_ARGS(trans),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ ),
+
+ TP_printk("%s", __entry->trans_fn)
+);
+
+TRACE_EVENT(trans_restart_mem_realloced,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ unsigned long bytes),
+ TP_ARGS(trans, caller_ip, bytes),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(unsigned long, bytes )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->bytes = bytes;
+ ),
+
+ TP_printk("%s %pS bytes %lu",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ __entry->bytes)
+);
+
+TRACE_EVENT(trans_restart_key_cache_key_realloced,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip,
+ struct btree_path *path,
+ unsigned old_u64s,
+ unsigned new_u64s),
+ TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+ __field(enum btree_id, btree_id )
+ TRACE_BPOS_entries(pos)
+ __field(u32, old_u64s )
+ __field(u32, new_u64s )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+
+ __entry->btree_id = path->btree_id;
+ TRACE_BPOS_assign(pos, path->pos);
+ __entry->old_u64s = old_u64s;
+ __entry->new_u64s = new_u64s;
+ ),
+
+ TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
+ __entry->trans_fn,
+ (void *) __entry->caller_ip,
+ bch2_btree_ids[__entry->btree_id],
+ __entry->pos_inode,
+ __entry->pos_offset,
+ __entry->pos_snapshot,
+ __entry->old_u64s,
+ __entry->new_u64s)
+);
+
+DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush,
+ TP_PROTO(struct btree_trans *trans,
+ unsigned long caller_ip),
+ TP_ARGS(trans, caller_ip)
+);
+
+TRACE_EVENT(write_buffer_flush,
+ TP_PROTO(struct btree_trans *trans, size_t nr, size_t skipped, size_t fast, size_t size),
+ TP_ARGS(trans, nr, skipped, fast, size),
+
+ TP_STRUCT__entry(
+ __field(size_t, nr )
+ __field(size_t, skipped )
+ __field(size_t, fast )
+ __field(size_t, size )
+ ),
+
+ TP_fast_assign(
+ __entry->nr = nr;
+ __entry->skipped = skipped;
+ __entry->fast = fast;
+ __entry->size = size;
+ ),
+
+ TP_printk("%zu/%zu skipped %zu fast %zu",
+ __entry->nr, __entry->size, __entry->skipped, __entry->fast)
+);
+
+TRACE_EVENT(write_buffer_flush_slowpath,
+ TP_PROTO(struct btree_trans *trans, size_t nr, size_t size),
+ TP_ARGS(trans, nr, size),
+
+ TP_STRUCT__entry(
+ __field(size_t, nr )
+ __field(size_t, size )
+ ),
+
+ TP_fast_assign(
+ __entry->nr = nr;
+ __entry->size = size;
+ ),
+
+ TP_printk("%zu/%zu", __entry->nr, __entry->size)
+);
+
+#endif /* _TRACE_BCACHEFS_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../fs/bcachefs
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/fs/bcachefs/two_state_shared_lock.c b/fs/bcachefs/two_state_shared_lock.c
new file mode 100644
index 000000000000..9764c2e6a910
--- /dev/null
+++ b/fs/bcachefs/two_state_shared_lock.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "two_state_shared_lock.h"
+
+void __bch2_two_state_lock(two_state_lock_t *lock, int s)
+{
+ __wait_event(lock->wait, bch2_two_state_trylock(lock, s));
+}
diff --git a/fs/bcachefs/two_state_shared_lock.h b/fs/bcachefs/two_state_shared_lock.h
new file mode 100644
index 000000000000..905801772002
--- /dev/null
+++ b/fs/bcachefs/two_state_shared_lock.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_TWO_STATE_LOCK_H
+#define _BCACHEFS_TWO_STATE_LOCK_H
+
+#include <linux/atomic.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include "util.h"
+
+/*
+ * Two-state lock - can be taken for add or block - both states are shared,
+ * like read side of rwsem, but conflict with other state:
+ */
+typedef struct {
+ atomic_long_t v;
+ wait_queue_head_t wait;
+} two_state_lock_t;
+
+static inline void two_state_lock_init(two_state_lock_t *lock)
+{
+ atomic_long_set(&lock->v, 0);
+ init_waitqueue_head(&lock->wait);
+}
+
+static inline void bch2_two_state_unlock(two_state_lock_t *lock, int s)
+{
+ long i = s ? 1 : -1;
+
+ EBUG_ON(atomic_long_read(&lock->v) == 0);
+
+ if (atomic_long_sub_return_release(i, &lock->v) == 0)
+ wake_up_all(&lock->wait);
+}
+
+static inline bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
+{
+ long i = s ? 1 : -1;
+ long v = atomic_long_read(&lock->v), old;
+
+ do {
+ old = v;
+
+ if (i > 0 ? v < 0 : v > 0)
+ return false;
+ } while ((v = atomic_long_cmpxchg_acquire(&lock->v,
+ old, old + i)) != old);
+ return true;
+}
+
+void __bch2_two_state_lock(two_state_lock_t *, int);
+
+static inline void bch2_two_state_lock(two_state_lock_t *lock, int s)
+{
+ if (!bch2_two_state_trylock(lock, s))
+ __bch2_two_state_lock(lock, s);
+}
+
+#endif /* _BCACHEFS_TWO_STATE_LOCK_H */
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
new file mode 100644
index 000000000000..08bac0ba8d0b
--- /dev/null
+++ b/fs/bcachefs/util.c
@@ -0,0 +1,1141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * random utiility code, for bcache but in theory not specific to bcache
+ *
+ * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
+ * Copyright 2012 Google, Inc.
+ */
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/console.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/random.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/sched/clock.h>
+
+#include "eytzinger.h"
+#include "mean_and_variance.h"
+#include "util.h"
+
+static const char si_units[] = "?kMGTPEZY";
+
+/* string_get_size units: */
+static const char *const units_2[] = {
+ "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
+};
+static const char *const units_10[] = {
+ "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
+};
+
+static int parse_u64(const char *cp, u64 *res)
+{
+ const char *start = cp;
+ u64 v = 0;
+
+ if (!isdigit(*cp))
+ return -EINVAL;
+
+ do {
+ if (v > U64_MAX / 10)
+ return -ERANGE;
+ v *= 10;
+ if (v > U64_MAX - (*cp - '0'))
+ return -ERANGE;
+ v += *cp - '0';
+ cp++;
+ } while (isdigit(*cp));
+
+ *res = v;
+ return cp - start;
+}
+
+static int bch2_pow(u64 n, u64 p, u64 *res)
+{
+ *res = 1;
+
+ while (p--) {
+ if (*res > div_u64(U64_MAX, n))
+ return -ERANGE;
+ *res *= n;
+ }
+ return 0;
+}
+
+static int parse_unit_suffix(const char *cp, u64 *res)
+{
+ const char *start = cp;
+ u64 base = 1024;
+ unsigned u;
+ int ret;
+
+ if (*cp == ' ')
+ cp++;
+
+ for (u = 1; u < strlen(si_units); u++)
+ if (*cp == si_units[u]) {
+ cp++;
+ goto got_unit;
+ }
+
+ for (u = 0; u < ARRAY_SIZE(units_2); u++)
+ if (!strncmp(cp, units_2[u], strlen(units_2[u]))) {
+ cp += strlen(units_2[u]);
+ goto got_unit;
+ }
+
+ for (u = 0; u < ARRAY_SIZE(units_10); u++)
+ if (!strncmp(cp, units_10[u], strlen(units_10[u]))) {
+ cp += strlen(units_10[u]);
+ base = 1000;
+ goto got_unit;
+ }
+
+ *res = 1;
+ return 0;
+got_unit:
+ ret = bch2_pow(base, u, res);
+ if (ret)
+ return ret;
+
+ return cp - start;
+}
+
+#define parse_or_ret(cp, _f) \
+do { \
+ int _ret = _f; \
+ if (_ret < 0) \
+ return _ret; \
+ cp += _ret; \
+} while (0)
+
+static int __bch2_strtou64_h(const char *cp, u64 *res)
+{
+ const char *start = cp;
+ u64 v = 0, b, f_n = 0, f_d = 1;
+ int ret;
+
+ parse_or_ret(cp, parse_u64(cp, &v));
+
+ if (*cp == '.') {
+ cp++;
+ ret = parse_u64(cp, &f_n);
+ if (ret < 0)
+ return ret;
+ cp += ret;
+
+ ret = bch2_pow(10, ret, &f_d);
+ if (ret)
+ return ret;
+ }
+
+ parse_or_ret(cp, parse_unit_suffix(cp, &b));
+
+ if (v > div_u64(U64_MAX, b))
+ return -ERANGE;
+ v *= b;
+
+ if (f_n > div_u64(U64_MAX, b))
+ return -ERANGE;
+
+ f_n = div_u64(f_n * b, f_d);
+ if (v + f_n < v)
+ return -ERANGE;
+ v += f_n;
+
+ *res = v;
+ return cp - start;
+}
+
+static int __bch2_strtoh(const char *cp, u64 *res,
+ u64 t_max, bool t_signed)
+{
+ bool positive = *cp != '-';
+ u64 v = 0;
+
+ if (*cp == '+' || *cp == '-')
+ cp++;
+
+ parse_or_ret(cp, __bch2_strtou64_h(cp, &v));
+
+ if (*cp == '\n')
+ cp++;
+ if (*cp)
+ return -EINVAL;
+
+ if (positive) {
+ if (v > t_max)
+ return -ERANGE;
+ } else {
+ if (v && !t_signed)
+ return -ERANGE;
+
+ if (v > t_max + 1)
+ return -ERANGE;
+ v = -v;
+ }
+
+ *res = v;
+ return 0;
+}
+
+#define STRTO_H(name, type) \
+int bch2_ ## name ## _h(const char *cp, type *res) \
+{ \
+ u64 v = 0; \
+ int ret = __bch2_strtoh(cp, &v, ANYSINT_MAX(type), \
+ ANYSINT_MAX(type) != ((type) ~0ULL)); \
+ *res = v; \
+ return ret; \
+}
+
+STRTO_H(strtoint, int)
+STRTO_H(strtouint, unsigned int)
+STRTO_H(strtoll, long long)
+STRTO_H(strtoull, unsigned long long)
+STRTO_H(strtou64, u64)
+
+u64 bch2_read_flag_list(char *opt, const char * const list[])
+{
+ u64 ret = 0;
+ char *p, *s, *d = kstrdup(opt, GFP_KERNEL);
+
+ if (!d)
+ return -ENOMEM;
+
+ s = strim(d);
+
+ while ((p = strsep(&s, ","))) {
+ int flag = match_string(list, -1, p);
+
+ if (flag < 0) {
+ ret = -1;
+ break;
+ }
+
+ ret |= 1 << flag;
+ }
+
+ kfree(d);
+
+ return ret;
+}
+
+bool bch2_is_zero(const void *_p, size_t n)
+{
+ const char *p = _p;
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ if (p[i])
+ return false;
+ return true;
+}
+
+void bch2_prt_u64_binary(struct printbuf *out, u64 v, unsigned nr_bits)
+{
+ while (nr_bits)
+ prt_char(out, '0' + ((v >> --nr_bits) & 1));
+}
+
+void bch2_print_string_as_lines(const char *prefix, const char *lines)
+{
+ const char *p;
+
+ if (!lines) {
+ printk("%s (null)\n", prefix);
+ return;
+ }
+
+ console_lock();
+ while (1) {
+ p = strchrnul(lines, '\n');
+ printk("%s%.*s\n", prefix, (int) (p - lines), lines);
+ if (!*p)
+ break;
+ lines = p + 1;
+ }
+ console_unlock();
+}
+
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task)
+{
+#ifdef CONFIG_STACKTRACE
+ unsigned nr_entries = 0;
+ int ret = 0;
+
+ stack->nr = 0;
+ ret = darray_make_room(stack, 32);
+ if (ret)
+ return ret;
+
+ if (!down_read_trylock(&task->signal->exec_update_lock))
+ return -1;
+
+ do {
+ nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, 0);
+ } while (nr_entries == stack->size &&
+ !(ret = darray_make_room(stack, stack->size * 2)));
+
+ stack->nr = nr_entries;
+ up_read(&task->signal->exec_update_lock);
+
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
+{
+ unsigned long *i;
+
+ darray_for_each(*stack, i) {
+ prt_printf(out, "[<0>] %pB", (void *) *i);
+ prt_newline(out);
+ }
+}
+
+int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task)
+{
+ bch_stacktrace stack = { 0 };
+ int ret = bch2_save_backtrace(&stack, task);
+
+ bch2_prt_backtrace(out, &stack);
+ darray_exit(&stack);
+ return ret;
+}
+
+/* time stats: */
+
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
+static void bch2_quantiles_update(struct bch2_quantiles *q, u64 v)
+{
+ unsigned i = 0;
+
+ while (i < ARRAY_SIZE(q->entries)) {
+ struct bch2_quantile_entry *e = q->entries + i;
+
+ if (unlikely(!e->step)) {
+ e->m = v;
+ e->step = max_t(unsigned, v / 2, 1024);
+ } else if (e->m > v) {
+ e->m = e->m >= e->step
+ ? e->m - e->step
+ : 0;
+ } else if (e->m < v) {
+ e->m = e->m + e->step > e->m
+ ? e->m + e->step
+ : U32_MAX;
+ }
+
+ if ((e->m > v ? e->m - v : v - e->m) < e->step)
+ e->step = max_t(unsigned, e->step / 2, 1);
+
+ if (v >= e->m)
+ break;
+
+ i = eytzinger0_child(i, v > e->m);
+ }
+}
+
+static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
+ u64 start, u64 end)
+{
+ u64 duration, freq;
+
+ if (time_after64(end, start)) {
+ duration = end - start;
+ mean_and_variance_update(&stats->duration_stats, duration);
+ mean_and_variance_weighted_update(&stats->duration_stats_weighted, duration);
+ stats->max_duration = max(stats->max_duration, duration);
+ stats->min_duration = min(stats->min_duration, duration);
+ bch2_quantiles_update(&stats->quantiles, duration);
+ }
+
+ if (time_after64(end, stats->last_event)) {
+ freq = end - stats->last_event;
+ mean_and_variance_update(&stats->freq_stats, freq);
+ mean_and_variance_weighted_update(&stats->freq_stats_weighted, freq);
+ stats->max_freq = max(stats->max_freq, freq);
+ stats->min_freq = min(stats->min_freq, freq);
+ stats->last_event = end;
+ }
+}
+
+static noinline void bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
+ struct bch2_time_stat_buffer *b)
+{
+ struct bch2_time_stat_buffer_entry *i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->lock, flags);
+ for (i = b->entries;
+ i < b->entries + ARRAY_SIZE(b->entries);
+ i++)
+ bch2_time_stats_update_one(stats, i->start, i->end);
+ spin_unlock_irqrestore(&stats->lock, flags);
+
+ b->nr = 0;
+}
+
+void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
+{
+ unsigned long flags;
+
+ WARN_RATELIMIT(!stats->min_duration || !stats->min_freq,
+ "time_stats: min_duration = %llu, min_freq = %llu",
+ stats->min_duration, stats->min_freq);
+
+ if (!stats->buffer) {
+ spin_lock_irqsave(&stats->lock, flags);
+ bch2_time_stats_update_one(stats, start, end);
+
+ if (mean_and_variance_weighted_get_mean(stats->freq_stats_weighted) < 32 &&
+ stats->duration_stats.n > 1024)
+ stats->buffer =
+ alloc_percpu_gfp(struct bch2_time_stat_buffer,
+ GFP_ATOMIC);
+ spin_unlock_irqrestore(&stats->lock, flags);
+ } else {
+ struct bch2_time_stat_buffer *b;
+
+ preempt_disable();
+ b = this_cpu_ptr(stats->buffer);
+
+ BUG_ON(b->nr >= ARRAY_SIZE(b->entries));
+ b->entries[b->nr++] = (struct bch2_time_stat_buffer_entry) {
+ .start = start,
+ .end = end
+ };
+
+ if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
+ bch2_time_stats_clear_buffer(stats, b);
+ preempt_enable();
+ }
+}
+#endif
+
+static const struct time_unit {
+ const char *name;
+ u64 nsecs;
+} time_units[] = {
+ { "ns", 1 },
+ { "us", NSEC_PER_USEC },
+ { "ms", NSEC_PER_MSEC },
+ { "s", NSEC_PER_SEC },
+ { "m", (u64) NSEC_PER_SEC * 60},
+ { "h", (u64) NSEC_PER_SEC * 3600},
+ { "eon", U64_MAX },
+};
+
+static const struct time_unit *pick_time_units(u64 ns)
+{
+ const struct time_unit *u;
+
+ for (u = time_units;
+ u + 1 < time_units + ARRAY_SIZE(time_units) &&
+ ns >= u[1].nsecs << 1;
+ u++)
+ ;
+
+ return u;
+}
+
+void bch2_pr_time_units(struct printbuf *out, u64 ns)
+{
+ const struct time_unit *u = pick_time_units(ns);
+
+ prt_printf(out, "%llu %s", div_u64(ns, u->nsecs), u->name);
+}
+
+static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
+{
+ const struct time_unit *u = pick_time_units(ns);
+
+ prt_printf(out, "%llu ", div64_u64(ns, u->nsecs));
+ prt_tab_rjust(out);
+ prt_printf(out, "%s", u->name);
+}
+
+#define TABSTOP_SIZE 12
+
+static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
+{
+ prt_str(out, name);
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, ns);
+ prt_newline(out);
+}
+
+void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats)
+{
+ const struct time_unit *u;
+ s64 f_mean = 0, d_mean = 0;
+ u64 q, last_q = 0, f_stddev = 0, d_stddev = 0;
+ int i;
+ /*
+ * avoid divide by zero
+ */
+ if (stats->freq_stats.n) {
+ f_mean = mean_and_variance_get_mean(stats->freq_stats);
+ f_stddev = mean_and_variance_get_stddev(stats->freq_stats);
+ d_mean = mean_and_variance_get_mean(stats->duration_stats);
+ d_stddev = mean_and_variance_get_stddev(stats->duration_stats);
+ }
+
+ printbuf_tabstop_push(out, out->indent + TABSTOP_SIZE);
+ prt_printf(out, "count:");
+ prt_tab(out);
+ prt_printf(out, "%llu ",
+ stats->duration_stats.n);
+ printbuf_tabstop_pop(out);
+ prt_newline(out);
+
+ printbuf_tabstops_reset(out);
+
+ printbuf_tabstop_push(out, out->indent + 20);
+ printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
+ printbuf_tabstop_push(out, 0);
+ printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
+
+ prt_tab(out);
+ prt_printf(out, "since mount");
+ prt_tab_rjust(out);
+ prt_tab(out);
+ prt_printf(out, "recent");
+ prt_tab_rjust(out);
+ prt_newline(out);
+
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, out->indent + 20);
+ printbuf_tabstop_push(out, TABSTOP_SIZE);
+ printbuf_tabstop_push(out, 2);
+ printbuf_tabstop_push(out, TABSTOP_SIZE);
+
+ prt_printf(out, "duration of events");
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ pr_name_and_units(out, "min:", stats->min_duration);
+ pr_name_and_units(out, "max:", stats->max_duration);
+
+ prt_printf(out, "mean:");
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, d_mean);
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->duration_stats_weighted));
+ prt_newline(out);
+
+ prt_printf(out, "stddev:");
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, d_stddev);
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->duration_stats_weighted));
+
+ printbuf_indent_sub(out, 2);
+ prt_newline(out);
+
+ prt_printf(out, "time between events");
+ prt_newline(out);
+ printbuf_indent_add(out, 2);
+
+ pr_name_and_units(out, "min:", stats->min_freq);
+ pr_name_and_units(out, "max:", stats->max_freq);
+
+ prt_printf(out, "mean:");
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, f_mean);
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->freq_stats_weighted));
+ prt_newline(out);
+
+ prt_printf(out, "stddev:");
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, f_stddev);
+ prt_tab(out);
+ bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->freq_stats_weighted));
+
+ printbuf_indent_sub(out, 2);
+ prt_newline(out);
+
+ printbuf_tabstops_reset(out);
+
+ i = eytzinger0_first(NR_QUANTILES);
+ u = pick_time_units(stats->quantiles.entries[i].m);
+
+ prt_printf(out, "quantiles (%s):\t", u->name);
+ eytzinger0_for_each(i, NR_QUANTILES) {
+ bool is_last = eytzinger0_next(i, NR_QUANTILES) == -1;
+
+ q = max(stats->quantiles.entries[i].m, last_q);
+ prt_printf(out, "%llu ",
+ div_u64(q, u->nsecs));
+ if (is_last)
+ prt_newline(out);
+ last_q = q;
+ }
+}
+
+void bch2_time_stats_exit(struct bch2_time_stats *stats)
+{
+ free_percpu(stats->buffer);
+}
+
+void bch2_time_stats_init(struct bch2_time_stats *stats)
+{
+ memset(stats, 0, sizeof(*stats));
+ stats->duration_stats_weighted.weight = 8;
+ stats->freq_stats_weighted.weight = 8;
+ stats->min_duration = U64_MAX;
+ stats->min_freq = U64_MAX;
+ spin_lock_init(&stats->lock);
+}
+
+/* ratelimit: */
+
+/**
+ * bch2_ratelimit_delay() - return how long to delay until the next time to do
+ * some work
+ * @d: the struct bch_ratelimit to update
+ * Returns: the amount of time to delay by, in jiffies
+ */
+u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
+{
+ u64 now = local_clock();
+
+ return time_after64(d->next, now)
+ ? nsecs_to_jiffies(d->next - now)
+ : 0;
+}
+
+/**
+ * bch2_ratelimit_increment() - increment @d by the amount of work done
+ * @d: the struct bch_ratelimit to update
+ * @done: the amount of work done, in arbitrary units
+ */
+void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
+{
+ u64 now = local_clock();
+
+ d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+
+ if (time_before64(now + NSEC_PER_SEC, d->next))
+ d->next = now + NSEC_PER_SEC;
+
+ if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+ d->next = now - NSEC_PER_SEC * 2;
+}
+
+/* pd controller: */
+
+/*
+ * Updates pd_controller. Attempts to scale inputed values to units per second.
+ * @target: desired value
+ * @actual: current value
+ *
+ * @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing
+ * it makes actual go down.
+ */
+void bch2_pd_controller_update(struct bch_pd_controller *pd,
+ s64 target, s64 actual, int sign)
+{
+ s64 proportional, derivative, change;
+
+ unsigned long seconds_since_update = (jiffies - pd->last_update) / HZ;
+
+ if (seconds_since_update == 0)
+ return;
+
+ pd->last_update = jiffies;
+
+ proportional = actual - target;
+ proportional *= seconds_since_update;
+ proportional = div_s64(proportional, pd->p_term_inverse);
+
+ derivative = actual - pd->last_actual;
+ derivative = div_s64(derivative, seconds_since_update);
+ derivative = ewma_add(pd->smoothed_derivative, derivative,
+ (pd->d_term / seconds_since_update) ?: 1);
+ derivative = derivative * pd->d_term;
+ derivative = div_s64(derivative, pd->p_term_inverse);
+
+ change = proportional + derivative;
+
+ /* Don't increase rate if not keeping up */
+ if (change > 0 &&
+ pd->backpressure &&
+ time_after64(local_clock(),
+ pd->rate.next + NSEC_PER_MSEC))
+ change = 0;
+
+ change *= (sign * -1);
+
+ pd->rate.rate = clamp_t(s64, (s64) pd->rate.rate + change,
+ 1, UINT_MAX);
+
+ pd->last_actual = actual;
+ pd->last_derivative = derivative;
+ pd->last_proportional = proportional;
+ pd->last_change = change;
+ pd->last_target = target;
+}
+
+void bch2_pd_controller_init(struct bch_pd_controller *pd)
+{
+ pd->rate.rate = 1024;
+ pd->last_update = jiffies;
+ pd->p_term_inverse = 6000;
+ pd->d_term = 30;
+ pd->d_smooth = pd->d_term;
+ pd->backpressure = 1;
+}
+
+void bch2_pd_controller_debug_to_text(struct printbuf *out, struct bch_pd_controller *pd)
+{
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 20);
+
+ prt_printf(out, "rate:");
+ prt_tab(out);
+ prt_human_readable_s64(out, pd->rate.rate);
+ prt_newline(out);
+
+ prt_printf(out, "target:");
+ prt_tab(out);
+ prt_human_readable_u64(out, pd->last_target);
+ prt_newline(out);
+
+ prt_printf(out, "actual:");
+ prt_tab(out);
+ prt_human_readable_u64(out, pd->last_actual);
+ prt_newline(out);
+
+ prt_printf(out, "proportional:");
+ prt_tab(out);
+ prt_human_readable_s64(out, pd->last_proportional);
+ prt_newline(out);
+
+ prt_printf(out, "derivative:");
+ prt_tab(out);
+ prt_human_readable_s64(out, pd->last_derivative);
+ prt_newline(out);
+
+ prt_printf(out, "change:");
+ prt_tab(out);
+ prt_human_readable_s64(out, pd->last_change);
+ prt_newline(out);
+
+ prt_printf(out, "next io:");
+ prt_tab(out);
+ prt_printf(out, "%llims", div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC));
+ prt_newline(out);
+}
+
+/* misc: */
+
+void bch2_bio_map(struct bio *bio, void *base, size_t size)
+{
+ while (size) {
+ struct page *page = is_vmalloc_addr(base)
+ ? vmalloc_to_page(base)
+ : virt_to_page(base);
+ unsigned offset = offset_in_page(base);
+ unsigned len = min_t(size_t, PAGE_SIZE - offset, size);
+
+ BUG_ON(!bio_add_page(bio, page, len, offset));
+ size -= len;
+ base += len;
+ }
+}
+
+int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask)
+{
+ while (size) {
+ struct page *page = alloc_pages(gfp_mask, 0);
+ unsigned len = min_t(size_t, PAGE_SIZE, size);
+
+ if (!page)
+ return -ENOMEM;
+
+ if (unlikely(!bio_add_page(bio, page, len, 0))) {
+ __free_page(page);
+ break;
+ }
+
+ size -= len;
+ }
+
+ return 0;
+}
+
+size_t bch2_rand_range(size_t max)
+{
+ size_t rand;
+
+ if (!max)
+ return 0;
+
+ do {
+ rand = get_random_long();
+ rand &= roundup_pow_of_two(max) - 1;
+ } while (rand >= max);
+
+ return rand;
+}
+
+void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
+{
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ __bio_for_each_segment(bv, dst, iter, dst_iter) {
+ void *dstp = kmap_local_page(bv.bv_page);
+
+ memcpy(dstp + bv.bv_offset, src, bv.bv_len);
+ kunmap_local(dstp);
+
+ src += bv.bv_len;
+ }
+}
+
+void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
+{
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ __bio_for_each_segment(bv, src, iter, src_iter) {
+ void *srcp = kmap_local_page(bv.bv_page);
+
+ memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
+ kunmap_local(srcp);
+
+ dst += bv.bv_len;
+ }
+}
+
+static int alignment_ok(const void *base, size_t align)
+{
+ return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ ((unsigned long)base & (align - 1)) == 0;
+}
+
+static void u32_swap(void *a, void *b, size_t size)
+{
+ u32 t = *(u32 *)a;
+ *(u32 *)a = *(u32 *)b;
+ *(u32 *)b = t;
+}
+
+static void u64_swap(void *a, void *b, size_t size)
+{
+ u64 t = *(u64 *)a;
+ *(u64 *)a = *(u64 *)b;
+ *(u64 *)b = t;
+}
+
+static void generic_swap(void *a, void *b, size_t size)
+{
+ char t;
+
+ do {
+ t = *(char *)a;
+ *(char *)a++ = *(char *)b;
+ *(char *)b++ = t;
+ } while (--size > 0);
+}
+
+static inline int do_cmp(void *base, size_t n, size_t size,
+ int (*cmp_func)(const void *, const void *, size_t),
+ size_t l, size_t r)
+{
+ return cmp_func(base + inorder_to_eytzinger0(l, n) * size,
+ base + inorder_to_eytzinger0(r, n) * size,
+ size);
+}
+
+static inline void do_swap(void *base, size_t n, size_t size,
+ void (*swap_func)(void *, void *, size_t),
+ size_t l, size_t r)
+{
+ swap_func(base + inorder_to_eytzinger0(l, n) * size,
+ base + inorder_to_eytzinger0(r, n) * size,
+ size);
+}
+
+void eytzinger0_sort(void *base, size_t n, size_t size,
+ int (*cmp_func)(const void *, const void *, size_t),
+ void (*swap_func)(void *, void *, size_t))
+{
+ int i, c, r;
+
+ if (!swap_func) {
+ if (size == 4 && alignment_ok(base, 4))
+ swap_func = u32_swap;
+ else if (size == 8 && alignment_ok(base, 8))
+ swap_func = u64_swap;
+ else
+ swap_func = generic_swap;
+ }
+
+ /* heapify */
+ for (i = n / 2 - 1; i >= 0; --i) {
+ for (r = i; r * 2 + 1 < n; r = c) {
+ c = r * 2 + 1;
+
+ if (c + 1 < n &&
+ do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
+ c++;
+
+ if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
+ break;
+
+ do_swap(base, n, size, swap_func, r, c);
+ }
+ }
+
+ /* sort */
+ for (i = n - 1; i > 0; --i) {
+ do_swap(base, n, size, swap_func, 0, i);
+
+ for (r = 0; r * 2 + 1 < i; r = c) {
+ c = r * 2 + 1;
+
+ if (c + 1 < i &&
+ do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
+ c++;
+
+ if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
+ break;
+
+ do_swap(base, n, size, swap_func, r, c);
+ }
+ }
+}
+
+void sort_cmp_size(void *base, size_t num, size_t size,
+ int (*cmp_func)(const void *, const void *, size_t),
+ void (*swap_func)(void *, void *, size_t size))
+{
+ /* pre-scale counters for performance */
+ int i = (num/2 - 1) * size, n = num * size, c, r;
+
+ if (!swap_func) {
+ if (size == 4 && alignment_ok(base, 4))
+ swap_func = u32_swap;
+ else if (size == 8 && alignment_ok(base, 8))
+ swap_func = u64_swap;
+ else
+ swap_func = generic_swap;
+ }
+
+ /* heapify */
+ for ( ; i >= 0; i -= size) {
+ for (r = i; r * 2 + size < n; r = c) {
+ c = r * 2 + size;
+ if (c < n - size &&
+ cmp_func(base + c, base + c + size, size) < 0)
+ c += size;
+ if (cmp_func(base + r, base + c, size) >= 0)
+ break;
+ swap_func(base + r, base + c, size);
+ }
+ }
+
+ /* sort */
+ for (i = n - size; i > 0; i -= size) {
+ swap_func(base, base + i, size);
+ for (r = 0; r * 2 + size < i; r = c) {
+ c = r * 2 + size;
+ if (c < i - size &&
+ cmp_func(base + c, base + c + size, size) < 0)
+ c += size;
+ if (cmp_func(base + r, base + c, size) >= 0)
+ break;
+ swap_func(base + r, base + c, size);
+ }
+ }
+}
+
+static void mempool_free_vp(void *element, void *pool_data)
+{
+ size_t size = (size_t) pool_data;
+
+ vpfree(element, size);
+}
+
+static void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
+{
+ size_t size = (size_t) pool_data;
+
+ return vpmalloc(size, gfp_mask);
+}
+
+int mempool_init_kvpmalloc_pool(mempool_t *pool, int min_nr, size_t size)
+{
+ return size < PAGE_SIZE
+ ? mempool_init_kmalloc_pool(pool, min_nr, size)
+ : mempool_init(pool, min_nr, mempool_alloc_vp,
+ mempool_free_vp, (void *) size);
+}
+
+#if 0
+void eytzinger1_test(void)
+{
+ unsigned inorder, eytz, size;
+
+ pr_info("1 based eytzinger test:");
+
+ for (size = 2;
+ size < 65536;
+ size++) {
+ unsigned extra = eytzinger1_extra(size);
+
+ if (!(size % 4096))
+ pr_info("tree size %u", size);
+
+ BUG_ON(eytzinger1_prev(0, size) != eytzinger1_last(size));
+ BUG_ON(eytzinger1_next(0, size) != eytzinger1_first(size));
+
+ BUG_ON(eytzinger1_prev(eytzinger1_first(size), size) != 0);
+ BUG_ON(eytzinger1_next(eytzinger1_last(size), size) != 0);
+
+ inorder = 1;
+ eytzinger1_for_each(eytz, size) {
+ BUG_ON(__inorder_to_eytzinger1(inorder, size, extra) != eytz);
+ BUG_ON(__eytzinger1_to_inorder(eytz, size, extra) != inorder);
+ BUG_ON(eytz != eytzinger1_last(size) &&
+ eytzinger1_prev(eytzinger1_next(eytz, size), size) != eytz);
+
+ inorder++;
+ }
+ }
+}
+
+void eytzinger0_test(void)
+{
+
+ unsigned inorder, eytz, size;
+
+ pr_info("0 based eytzinger test:");
+
+ for (size = 1;
+ size < 65536;
+ size++) {
+ unsigned extra = eytzinger0_extra(size);
+
+ if (!(size % 4096))
+ pr_info("tree size %u", size);
+
+ BUG_ON(eytzinger0_prev(-1, size) != eytzinger0_last(size));
+ BUG_ON(eytzinger0_next(-1, size) != eytzinger0_first(size));
+
+ BUG_ON(eytzinger0_prev(eytzinger0_first(size), size) != -1);
+ BUG_ON(eytzinger0_next(eytzinger0_last(size), size) != -1);
+
+ inorder = 0;
+ eytzinger0_for_each(eytz, size) {
+ BUG_ON(__inorder_to_eytzinger0(inorder, size, extra) != eytz);
+ BUG_ON(__eytzinger0_to_inorder(eytz, size, extra) != inorder);
+ BUG_ON(eytz != eytzinger0_last(size) &&
+ eytzinger0_prev(eytzinger0_next(eytz, size), size) != eytz);
+
+ inorder++;
+ }
+ }
+}
+
+static inline int cmp_u16(const void *_l, const void *_r, size_t size)
+{
+ const u16 *l = _l, *r = _r;
+
+ return (*l > *r) - (*r - *l);
+}
+
+static void eytzinger0_find_test_val(u16 *test_array, unsigned nr, u16 search)
+{
+ int i, c1 = -1, c2 = -1;
+ ssize_t r;
+
+ r = eytzinger0_find_le(test_array, nr,
+ sizeof(test_array[0]),
+ cmp_u16, &search);
+ if (r >= 0)
+ c1 = test_array[r];
+
+ for (i = 0; i < nr; i++)
+ if (test_array[i] <= search && test_array[i] > c2)
+ c2 = test_array[i];
+
+ if (c1 != c2) {
+ eytzinger0_for_each(i, nr)
+ pr_info("[%3u] = %12u", i, test_array[i]);
+ pr_info("find_le(%2u) -> [%2zi] = %2i should be %2i",
+ i, r, c1, c2);
+ }
+}
+
+void eytzinger0_find_test(void)
+{
+ unsigned i, nr, allocated = 1 << 12;
+ u16 *test_array = kmalloc_array(allocated, sizeof(test_array[0]), GFP_KERNEL);
+
+ for (nr = 1; nr < allocated; nr++) {
+ pr_info("testing %u elems", nr);
+
+ get_random_bytes(test_array, nr * sizeof(test_array[0]));
+ eytzinger0_sort(test_array, nr, sizeof(test_array[0]), cmp_u16, NULL);
+
+ /* verify array is sorted correctly: */
+ eytzinger0_for_each(i, nr)
+ BUG_ON(i != eytzinger0_last(nr) &&
+ test_array[i] > test_array[eytzinger0_next(i, nr)]);
+
+ for (i = 0; i < U16_MAX; i += 1 << 12)
+ eytzinger0_find_test_val(test_array, nr, i);
+
+ for (i = 0; i < nr; i++) {
+ eytzinger0_find_test_val(test_array, nr, test_array[i] - 1);
+ eytzinger0_find_test_val(test_array, nr, test_array[i]);
+ eytzinger0_find_test_val(test_array, nr, test_array[i] + 1);
+ }
+ }
+
+ kfree(test_array);
+}
+#endif
+
+/*
+ * Accumulate percpu counters onto one cpu's copy - only valid when access
+ * against any percpu counter is guarded against
+ */
+u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr)
+{
+ u64 *ret;
+ int cpu;
+
+ /* access to pcpu vars has to be blocked by other locking */
+ preempt_disable();
+ ret = this_cpu_ptr(p);
+ preempt_enable();
+
+ for_each_possible_cpu(cpu) {
+ u64 *i = per_cpu_ptr(p, cpu);
+
+ if (i != ret) {
+ acc_u64s(ret, i, nr);
+ memset(i, 0, nr * sizeof(u64));
+ }
+ }
+
+ return ret;
+}
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
new file mode 100644
index 000000000000..849a37ae497c
--- /dev/null
+++ b/fs/bcachefs/util.h
@@ -0,0 +1,852 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_UTIL_H
+#define _BCACHEFS_UTIL_H
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/closure.h>
+#include <linux/errno.h>
+#include <linux/freezer.h>
+#include <linux/kernel.h>
+#include <linux/sched/clock.h>
+#include <linux/llist.h>
+#include <linux/log2.h>
+#include <linux/percpu.h>
+#include <linux/preempt.h>
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include "mean_and_variance.h"
+
+#include "darray.h"
+
+struct closure;
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+#define EBUG_ON(cond) BUG_ON(cond)
+#else
+#define EBUG_ON(cond)
+#endif
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define CPU_BIG_ENDIAN 0
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define CPU_BIG_ENDIAN 1
+#endif
+
+/* type hackery */
+
+#define type_is_exact(_val, _type) \
+ __builtin_types_compatible_p(typeof(_val), _type)
+
+#define type_is(_val, _type) \
+ (__builtin_types_compatible_p(typeof(_val), _type) || \
+ __builtin_types_compatible_p(typeof(_val), const _type))
+
+/* Userspace doesn't align allocations as nicely as the kernel allocators: */
+static inline size_t buf_pages(void *p, size_t len)
+{
+ return DIV_ROUND_UP(len +
+ ((unsigned long) p & (PAGE_SIZE - 1)),
+ PAGE_SIZE);
+}
+
+static inline void vpfree(void *p, size_t size)
+{
+ if (is_vmalloc_addr(p))
+ vfree(p);
+ else
+ free_pages((unsigned long) p, get_order(size));
+}
+
+static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
+{
+ return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
+ get_order(size)) ?:
+ __vmalloc(size, gfp_mask);
+}
+
+static inline void kvpfree(void *p, size_t size)
+{
+ if (size < PAGE_SIZE)
+ kfree(p);
+ else
+ vpfree(p, size);
+}
+
+static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
+{
+ return size < PAGE_SIZE
+ ? kmalloc(size, gfp_mask)
+ : vpmalloc(size, gfp_mask);
+}
+
+int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
+
+#define HEAP(type) \
+struct { \
+ size_t size, used; \
+ type *data; \
+}
+
+#define DECLARE_HEAP(type, name) HEAP(type) name
+
+#define init_heap(heap, _size, gfp) \
+({ \
+ (heap)->used = 0; \
+ (heap)->size = (_size); \
+ (heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
+ (gfp)); \
+})
+
+#define free_heap(heap) \
+do { \
+ kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \
+ (heap)->data = NULL; \
+} while (0)
+
+#define heap_set_backpointer(h, i, _fn) \
+do { \
+ void (*fn)(typeof(h), size_t) = _fn; \
+ if (fn) \
+ fn(h, i); \
+} while (0)
+
+#define heap_swap(h, i, j, set_backpointer) \
+do { \
+ swap((h)->data[i], (h)->data[j]); \
+ heap_set_backpointer(h, i, set_backpointer); \
+ heap_set_backpointer(h, j, set_backpointer); \
+} while (0)
+
+#define heap_peek(h) \
+({ \
+ EBUG_ON(!(h)->used); \
+ (h)->data[0]; \
+})
+
+#define heap_full(h) ((h)->used == (h)->size)
+
+#define heap_sift_down(h, i, cmp, set_backpointer) \
+do { \
+ size_t _c, _j = i; \
+ \
+ for (; _j * 2 + 1 < (h)->used; _j = _c) { \
+ _c = _j * 2 + 1; \
+ if (_c + 1 < (h)->used && \
+ cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0) \
+ _c++; \
+ \
+ if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \
+ break; \
+ heap_swap(h, _c, _j, set_backpointer); \
+ } \
+} while (0)
+
+#define heap_sift_up(h, i, cmp, set_backpointer) \
+do { \
+ while (i) { \
+ size_t p = (i - 1) / 2; \
+ if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \
+ break; \
+ heap_swap(h, i, p, set_backpointer); \
+ i = p; \
+ } \
+} while (0)
+
+#define __heap_add(h, d, cmp, set_backpointer) \
+({ \
+ size_t _i = (h)->used++; \
+ (h)->data[_i] = d; \
+ heap_set_backpointer(h, _i, set_backpointer); \
+ \
+ heap_sift_up(h, _i, cmp, set_backpointer); \
+ _i; \
+})
+
+#define heap_add(h, d, cmp, set_backpointer) \
+({ \
+ bool _r = !heap_full(h); \
+ if (_r) \
+ __heap_add(h, d, cmp, set_backpointer); \
+ _r; \
+})
+
+#define heap_add_or_replace(h, new, cmp, set_backpointer) \
+do { \
+ if (!heap_add(h, new, cmp, set_backpointer) && \
+ cmp(h, new, heap_peek(h)) >= 0) { \
+ (h)->data[0] = new; \
+ heap_set_backpointer(h, 0, set_backpointer); \
+ heap_sift_down(h, 0, cmp, set_backpointer); \
+ } \
+} while (0)
+
+#define heap_del(h, i, cmp, set_backpointer) \
+do { \
+ size_t _i = (i); \
+ \
+ BUG_ON(_i >= (h)->used); \
+ (h)->used--; \
+ if ((_i) < (h)->used) { \
+ heap_swap(h, _i, (h)->used, set_backpointer); \
+ heap_sift_up(h, _i, cmp, set_backpointer); \
+ heap_sift_down(h, _i, cmp, set_backpointer); \
+ } \
+} while (0)
+
+#define heap_pop(h, d, cmp, set_backpointer) \
+({ \
+ bool _r = (h)->used; \
+ if (_r) { \
+ (d) = (h)->data[0]; \
+ heap_del(h, 0, cmp, set_backpointer); \
+ } \
+ _r; \
+})
+
+#define heap_resort(heap, cmp, set_backpointer) \
+do { \
+ ssize_t _i; \
+ for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \
+ heap_sift_down(heap, _i, cmp, set_backpointer); \
+} while (0)
+
+#define ANYSINT_MAX(t) \
+ ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
+
+#include "printbuf.h"
+
+#define prt_vprintf(_out, ...) bch2_prt_vprintf(_out, __VA_ARGS__)
+#define prt_printf(_out, ...) bch2_prt_printf(_out, __VA_ARGS__)
+#define printbuf_str(_buf) bch2_printbuf_str(_buf)
+#define printbuf_exit(_buf) bch2_printbuf_exit(_buf)
+
+#define printbuf_tabstops_reset(_buf) bch2_printbuf_tabstops_reset(_buf)
+#define printbuf_tabstop_pop(_buf) bch2_printbuf_tabstop_pop(_buf)
+#define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n)
+
+#define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n)
+#define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n)
+
+#define prt_newline(_out) bch2_prt_newline(_out)
+#define prt_tab(_out) bch2_prt_tab(_out)
+#define prt_tab_rjust(_out) bch2_prt_tab_rjust(_out)
+
+#define prt_bytes_indented(...) bch2_prt_bytes_indented(__VA_ARGS__)
+#define prt_u64(_out, _v) prt_printf(_out, "%llu", (u64) (_v))
+#define prt_human_readable_u64(...) bch2_prt_human_readable_u64(__VA_ARGS__)
+#define prt_human_readable_s64(...) bch2_prt_human_readable_s64(__VA_ARGS__)
+#define prt_units_u64(...) bch2_prt_units_u64(__VA_ARGS__)
+#define prt_units_s64(...) bch2_prt_units_s64(__VA_ARGS__)
+#define prt_string_option(...) bch2_prt_string_option(__VA_ARGS__)
+#define prt_bitflags(...) bch2_prt_bitflags(__VA_ARGS__)
+
+void bch2_pr_time_units(struct printbuf *, u64);
+
+#ifdef __KERNEL__
+static inline void pr_time(struct printbuf *out, u64 time)
+{
+ prt_printf(out, "%llu", time);
+}
+#else
+#include <time.h>
+static inline void pr_time(struct printbuf *out, u64 _time)
+{
+ char time_str[64];
+ time_t time = _time;
+ struct tm *tm = localtime(&time);
+ size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
+ if (!err)
+ prt_printf(out, "(formatting error)");
+ else
+ prt_printf(out, "%s", time_str);
+}
+#endif
+
+#ifdef __KERNEL__
+static inline void uuid_unparse_lower(u8 *uuid, char *out)
+{
+ sprintf(out, "%pUb", uuid);
+}
+#else
+#include <uuid/uuid.h>
+#endif
+
+static inline void pr_uuid(struct printbuf *out, u8 *uuid)
+{
+ char uuid_str[40];
+
+ uuid_unparse_lower(uuid, uuid_str);
+ prt_printf(out, "%s", uuid_str);
+}
+
+int bch2_strtoint_h(const char *, int *);
+int bch2_strtouint_h(const char *, unsigned int *);
+int bch2_strtoll_h(const char *, long long *);
+int bch2_strtoull_h(const char *, unsigned long long *);
+int bch2_strtou64_h(const char *, u64 *);
+
+static inline int bch2_strtol_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+ return bch2_strtoint_h(cp, (int *) res);
+#else
+ return bch2_strtoll_h(cp, (long long *) res);
+#endif
+}
+
+static inline int bch2_strtoul_h(const char *cp, long *res)
+{
+#if BITS_PER_LONG == 32
+ return bch2_strtouint_h(cp, (unsigned int *) res);
+#else
+ return bch2_strtoull_h(cp, (unsigned long long *) res);
+#endif
+}
+
+#define strtoi_h(cp, res) \
+ ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\
+ : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\
+ : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\
+ : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\
+ : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\
+ : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
+ : -EINVAL)
+
+#define strtoul_safe(cp, var) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (!_r) \
+ var = _v; \
+ _r; \
+})
+
+#define strtoul_safe_clamp(cp, var, min, max) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (!_r) \
+ var = clamp_t(typeof(var), _v, min, max); \
+ _r; \
+})
+
+#define strtoul_safe_restrict(cp, var, min, max) \
+({ \
+ unsigned long _v; \
+ int _r = kstrtoul(cp, 10, &_v); \
+ if (!_r && _v >= min && _v <= max) \
+ var = _v; \
+ else \
+ _r = -EINVAL; \
+ _r; \
+})
+
+#define snprint(out, var) \
+ prt_printf(out, \
+ type_is(var, int) ? "%i\n" \
+ : type_is(var, unsigned) ? "%u\n" \
+ : type_is(var, long) ? "%li\n" \
+ : type_is(var, unsigned long) ? "%lu\n" \
+ : type_is(var, s64) ? "%lli\n" \
+ : type_is(var, u64) ? "%llu\n" \
+ : type_is(var, char *) ? "%s\n" \
+ : "%i\n", var)
+
+bool bch2_is_zero(const void *, size_t);
+
+u64 bch2_read_flag_list(char *, const char * const[]);
+
+void bch2_prt_u64_binary(struct printbuf *, u64, unsigned);
+
+void bch2_print_string_as_lines(const char *prefix, const char *lines);
+
+typedef DARRAY(unsigned long) bch_stacktrace;
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *);
+void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
+int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *);
+
+#define NR_QUANTILES 15
+#define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES)
+#define QUANTILE_FIRST eytzinger0_first(NR_QUANTILES)
+#define QUANTILE_LAST eytzinger0_last(NR_QUANTILES)
+
+struct bch2_quantiles {
+ struct bch2_quantile_entry {
+ u64 m;
+ u64 step;
+ } entries[NR_QUANTILES];
+};
+
+struct bch2_time_stat_buffer {
+ unsigned nr;
+ struct bch2_time_stat_buffer_entry {
+ u64 start;
+ u64 end;
+ } entries[32];
+};
+
+struct bch2_time_stats {
+ spinlock_t lock;
+ /* all fields are in nanoseconds */
+ u64 max_duration;
+ u64 min_duration;
+ u64 max_freq;
+ u64 min_freq;
+ u64 last_event;
+ struct bch2_quantiles quantiles;
+
+ struct mean_and_variance duration_stats;
+ struct mean_and_variance_weighted duration_stats_weighted;
+ struct mean_and_variance freq_stats;
+ struct mean_and_variance_weighted freq_stats_weighted;
+ struct bch2_time_stat_buffer __percpu *buffer;
+};
+
+#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
+void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64);
+#else
+static inline void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) {}
+#endif
+
+static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start)
+{
+ __bch2_time_stats_update(stats, start, local_clock());
+}
+
+void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *);
+
+void bch2_time_stats_exit(struct bch2_time_stats *);
+void bch2_time_stats_init(struct bch2_time_stats *);
+
+#define ewma_add(ewma, val, weight) \
+({ \
+ typeof(ewma) _ewma = (ewma); \
+ typeof(weight) _weight = (weight); \
+ \
+ (((_ewma << _weight) - _ewma) + (val)) >> _weight; \
+})
+
+struct bch_ratelimit {
+ /* Next time we want to do some work, in nanoseconds */
+ u64 next;
+
+ /*
+ * Rate at which we want to do work, in units per nanosecond
+ * The units here correspond to the units passed to
+ * bch2_ratelimit_increment()
+ */
+ unsigned rate;
+};
+
+static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
+{
+ d->next = local_clock();
+}
+
+u64 bch2_ratelimit_delay(struct bch_ratelimit *);
+void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
+
+struct bch_pd_controller {
+ struct bch_ratelimit rate;
+ unsigned long last_update;
+
+ s64 last_actual;
+ s64 smoothed_derivative;
+
+ unsigned p_term_inverse;
+ unsigned d_smooth;
+ unsigned d_term;
+
+ /* for exporting to sysfs (no effect on behavior) */
+ s64 last_derivative;
+ s64 last_proportional;
+ s64 last_change;
+ s64 last_target;
+
+ /*
+ * If true, the rate will not increase if bch2_ratelimit_delay()
+ * is not being called often enough.
+ */
+ bool backpressure;
+};
+
+void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
+void bch2_pd_controller_init(struct bch_pd_controller *);
+void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *);
+
+#define sysfs_pd_controller_attribute(name) \
+ rw_attribute(name##_rate); \
+ rw_attribute(name##_rate_bytes); \
+ rw_attribute(name##_rate_d_term); \
+ rw_attribute(name##_rate_p_term_inverse); \
+ read_attribute(name##_rate_debug)
+
+#define sysfs_pd_controller_files(name) \
+ &sysfs_##name##_rate, \
+ &sysfs_##name##_rate_bytes, \
+ &sysfs_##name##_rate_d_term, \
+ &sysfs_##name##_rate_p_term_inverse, \
+ &sysfs_##name##_rate_debug
+
+#define sysfs_pd_controller_show(name, var) \
+do { \
+ sysfs_hprint(name##_rate, (var)->rate.rate); \
+ sysfs_print(name##_rate_bytes, (var)->rate.rate); \
+ sysfs_print(name##_rate_d_term, (var)->d_term); \
+ sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
+ \
+ if (attr == &sysfs_##name##_rate_debug) \
+ bch2_pd_controller_debug_to_text(out, var); \
+} while (0)
+
+#define sysfs_pd_controller_store(name, var) \
+do { \
+ sysfs_strtoul_clamp(name##_rate, \
+ (var)->rate.rate, 1, UINT_MAX); \
+ sysfs_strtoul_clamp(name##_rate_bytes, \
+ (var)->rate.rate, 1, UINT_MAX); \
+ sysfs_strtoul(name##_rate_d_term, (var)->d_term); \
+ sysfs_strtoul_clamp(name##_rate_p_term_inverse, \
+ (var)->p_term_inverse, 1, INT_MAX); \
+} while (0)
+
+#define container_of_or_null(ptr, type, member) \
+({ \
+ typeof(ptr) _ptr = ptr; \
+ _ptr ? container_of(_ptr, type, member) : NULL; \
+})
+
+/* Does linear interpolation between powers of two */
+static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
+{
+ unsigned fract = x & ~(~0 << fract_bits);
+
+ x >>= fract_bits;
+ x = 1 << x;
+ x += (x * fract) >> fract_bits;
+
+ return x;
+}
+
+void bch2_bio_map(struct bio *bio, void *base, size_t);
+int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
+
+static inline sector_t bdev_sectors(struct block_device *bdev)
+{
+ return bdev->bd_inode->i_size >> 9;
+}
+
+#define closure_bio_submit(bio, cl) \
+do { \
+ closure_get(cl); \
+ submit_bio(bio); \
+} while (0)
+
+#define kthread_wait(cond) \
+({ \
+ int _ret = 0; \
+ \
+ while (1) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (kthread_should_stop()) { \
+ _ret = -1; \
+ break; \
+ } \
+ \
+ if (cond) \
+ break; \
+ \
+ schedule(); \
+ } \
+ set_current_state(TASK_RUNNING); \
+ _ret; \
+})
+
+#define kthread_wait_freezable(cond) \
+({ \
+ int _ret = 0; \
+ while (1) { \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (kthread_should_stop()) { \
+ _ret = -1; \
+ break; \
+ } \
+ \
+ if (cond) \
+ break; \
+ \
+ schedule(); \
+ try_to_freeze(); \
+ } \
+ set_current_state(TASK_RUNNING); \
+ _ret; \
+})
+
+size_t bch2_rand_range(size_t);
+
+void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
+void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
+
+static inline void memcpy_u64s_small(void *dst, const void *src,
+ unsigned u64s)
+{
+ u64 *d = dst;
+ const u64 *s = src;
+
+ while (u64s--)
+ *d++ = *s++;
+}
+
+static inline void __memcpy_u64s(void *dst, const void *src,
+ unsigned u64s)
+{
+#ifdef CONFIG_X86_64
+ long d0, d1, d2;
+
+ asm volatile("rep ; movsq"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (u64s), "1" (dst), "2" (src)
+ : "memory");
+#else
+ u64 *d = dst;
+ const u64 *s = src;
+
+ while (u64s--)
+ *d++ = *s++;
+#endif
+}
+
+static inline void memcpy_u64s(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
+ dst + u64s * sizeof(u64) <= src));
+
+ __memcpy_u64s(dst, src, u64s);
+}
+
+static inline void __memmove_u64s_down(void *dst, const void *src,
+ unsigned u64s)
+{
+ __memcpy_u64s(dst, src, u64s);
+}
+
+static inline void memmove_u64s_down(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(dst > src);
+
+ __memmove_u64s_down(dst, src, u64s);
+}
+
+static inline void __memmove_u64s_down_small(void *dst, const void *src,
+ unsigned u64s)
+{
+ memcpy_u64s_small(dst, src, u64s);
+}
+
+static inline void memmove_u64s_down_small(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(dst > src);
+
+ __memmove_u64s_down_small(dst, src, u64s);
+}
+
+static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
+ unsigned u64s)
+{
+ u64 *dst = (u64 *) _dst + u64s;
+ u64 *src = (u64 *) _src + u64s;
+
+ while (u64s--)
+ *--dst = *--src;
+}
+
+static inline void memmove_u64s_up_small(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(dst < src);
+
+ __memmove_u64s_up_small(dst, src, u64s);
+}
+
+static inline void __memmove_u64s_up(void *_dst, const void *_src,
+ unsigned u64s)
+{
+ u64 *dst = (u64 *) _dst + u64s - 1;
+ u64 *src = (u64 *) _src + u64s - 1;
+
+#ifdef CONFIG_X86_64
+ long d0, d1, d2;
+
+ asm volatile("std ;\n"
+ "rep ; movsq\n"
+ "cld ;\n"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (u64s), "1" (dst), "2" (src)
+ : "memory");
+#else
+ while (u64s--)
+ *dst-- = *src--;
+#endif
+}
+
+static inline void memmove_u64s_up(void *dst, const void *src,
+ unsigned u64s)
+{
+ EBUG_ON(dst < src);
+
+ __memmove_u64s_up(dst, src, u64s);
+}
+
+static inline void memmove_u64s(void *dst, const void *src,
+ unsigned u64s)
+{
+ if (dst < src)
+ __memmove_u64s_down(dst, src, u64s);
+ else
+ __memmove_u64s_up(dst, src, u64s);
+}
+
+/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
+static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
+{
+ unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
+
+ memset(s + bytes, c, rem);
+}
+
+void sort_cmp_size(void *base, size_t num, size_t size,
+ int (*cmp_func)(const void *, const void *, size_t),
+ void (*swap_func)(void *, void *, size_t));
+
+/* just the memmove, doesn't update @_nr */
+#define __array_insert_item(_array, _nr, _pos) \
+ memmove(&(_array)[(_pos) + 1], \
+ &(_array)[(_pos)], \
+ sizeof((_array)[0]) * ((_nr) - (_pos)))
+
+#define array_insert_item(_array, _nr, _pos, _new_item) \
+do { \
+ __array_insert_item(_array, _nr, _pos); \
+ (_nr)++; \
+ (_array)[(_pos)] = (_new_item); \
+} while (0)
+
+#define array_remove_items(_array, _nr, _pos, _nr_to_remove) \
+do { \
+ (_nr) -= (_nr_to_remove); \
+ memmove(&(_array)[(_pos)], \
+ &(_array)[(_pos) + (_nr_to_remove)], \
+ sizeof((_array)[0]) * ((_nr) - (_pos))); \
+} while (0)
+
+#define array_remove_item(_array, _nr, _pos) \
+ array_remove_items(_array, _nr, _pos, 1)
+
+static inline void __move_gap(void *array, size_t element_size,
+ size_t nr, size_t size,
+ size_t old_gap, size_t new_gap)
+{
+ size_t gap_end = old_gap + size - nr;
+
+ if (new_gap < old_gap) {
+ size_t move = old_gap - new_gap;
+
+ memmove(array + element_size * (gap_end - move),
+ array + element_size * (old_gap - move),
+ element_size * move);
+ } else if (new_gap > old_gap) {
+ size_t move = new_gap - old_gap;
+
+ memmove(array + element_size * old_gap,
+ array + element_size * gap_end,
+ element_size * move);
+ }
+}
+
+/* Move the gap in a gap buffer: */
+#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \
+ __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
+
+#define bubble_sort(_base, _nr, _cmp) \
+do { \
+ ssize_t _i, _last; \
+ bool _swapped = true; \
+ \
+ for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
+ _swapped = false; \
+ for (_i = 0; _i < _last; _i++) \
+ if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) { \
+ swap((_base)[_i], (_base)[_i + 1]); \
+ _swapped = true; \
+ } \
+ } \
+} while (0)
+
+static inline u64 percpu_u64_get(u64 __percpu *src)
+{
+ u64 ret = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ ret += *per_cpu_ptr(src, cpu);
+ return ret;
+}
+
+static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(dst, cpu) = 0;
+ this_cpu_write(*dst, src);
+}
+
+static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
+{
+ unsigned i;
+
+ for (i = 0; i < nr; i++)
+ acc[i] += src[i];
+}
+
+static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
+ unsigned nr)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
+}
+
+static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ memset(per_cpu_ptr(p, cpu), c, bytes);
+}
+
+u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
+
+#define cmp_int(l, r) ((l > r) - (l < r))
+
+static inline int u8_cmp(u8 l, u8 r)
+{
+ return cmp_int(l, r);
+}
+
+static inline int cmp_le32(__le32 l, __le32 r)
+{
+ return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
+}
+
+#include <linux/uuid.h>
+
+#endif /* _BCACHEFS_UTIL_H */
diff --git a/fs/bcachefs/varint.c b/fs/bcachefs/varint.c
new file mode 100644
index 000000000000..cb4f33ed9ab3
--- /dev/null
+++ b/fs/bcachefs/varint.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/math.h>
+#include <linux/string.h>
+#include <asm/unaligned.h>
+
+#ifdef CONFIG_VALGRIND
+#include <valgrind/memcheck.h>
+#endif
+
+#include "varint.h"
+
+/**
+ * bch2_varint_encode - encode a variable length integer
+ * @out: destination to encode to
+ * @v: unsigned integer to encode
+ * Returns: size in bytes of the encoded integer - at most 9 bytes
+ */
+int bch2_varint_encode(u8 *out, u64 v)
+{
+ unsigned bits = fls64(v|1);
+ unsigned bytes = DIV_ROUND_UP(bits, 7);
+ __le64 v_le;
+
+ if (likely(bytes < 9)) {
+ v <<= bytes;
+ v |= ~(~0 << (bytes - 1));
+ v_le = cpu_to_le64(v);
+ memcpy(out, &v_le, bytes);
+ } else {
+ *out++ = 255;
+ bytes = 9;
+ put_unaligned_le64(v, out);
+ }
+
+ return bytes;
+}
+
+/**
+ * bch2_varint_decode - encode a variable length integer
+ * @in: varint to decode
+ * @end: end of buffer to decode from
+ * @out: on success, decoded integer
+ * Returns: size in bytes of the decoded integer - or -1 on failure (would
+ * have read past the end of the buffer)
+ */
+int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
+{
+ unsigned bytes = likely(in < end)
+ ? ffz(*in & 255) + 1
+ : 1;
+ u64 v;
+
+ if (unlikely(in + bytes > end))
+ return -1;
+
+ if (likely(bytes < 9)) {
+ __le64 v_le = 0;
+
+ memcpy(&v_le, in, bytes);
+ v = le64_to_cpu(v_le);
+ v >>= bytes;
+ } else {
+ v = get_unaligned_le64(++in);
+ }
+
+ *out = v;
+ return bytes;
+}
+
+/**
+ * bch2_varint_encode_fast - fast version of bch2_varint_encode
+ * @out: destination to encode to
+ * @v: unsigned integer to encode
+ * Returns: size in bytes of the encoded integer - at most 9 bytes
+ *
+ * This version assumes it's always safe to write 8 bytes to @out, even if the
+ * encoded integer would be smaller.
+ */
+int bch2_varint_encode_fast(u8 *out, u64 v)
+{
+ unsigned bits = fls64(v|1);
+ unsigned bytes = DIV_ROUND_UP(bits, 7);
+
+ if (likely(bytes < 9)) {
+ v <<= bytes;
+ v |= ~(~0 << (bytes - 1));
+ } else {
+ *out++ = 255;
+ bytes = 9;
+ }
+
+ put_unaligned_le64(v, out);
+ return bytes;
+}
+
+/**
+ * bch2_varint_decode_fast - fast version of bch2_varint_decode
+ * @in: varint to decode
+ * @end: end of buffer to decode from
+ * @out: on success, decoded integer
+ * Returns: size in bytes of the decoded integer - or -1 on failure (would
+ * have read past the end of the buffer)
+ *
+ * This version assumes that it is safe to read at most 8 bytes past the end of
+ * @end (we still return an error if the varint extends past @end).
+ */
+int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
+{
+#ifdef CONFIG_VALGRIND
+ VALGRIND_MAKE_MEM_DEFINED(in, 8);
+#endif
+ u64 v = get_unaligned_le64(in);
+ unsigned bytes = ffz(*in) + 1;
+
+ if (unlikely(in + bytes > end))
+ return -1;
+
+ if (likely(bytes < 9)) {
+ v >>= bytes;
+ v &= ~(~0ULL << (7 * bytes));
+ } else {
+ v = get_unaligned_le64(++in);
+ }
+
+ *out = v;
+ return bytes;
+}
diff --git a/fs/bcachefs/varint.h b/fs/bcachefs/varint.h
new file mode 100644
index 000000000000..92a182fb3d7a
--- /dev/null
+++ b/fs/bcachefs/varint.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_VARINT_H
+#define _BCACHEFS_VARINT_H
+
+int bch2_varint_encode(u8 *, u64);
+int bch2_varint_decode(const u8 *, const u8 *, u64 *);
+
+int bch2_varint_encode_fast(u8 *, u64);
+int bch2_varint_decode_fast(const u8 *, const u8 *, u64 *);
+
+#endif /* _BCACHEFS_VARINT_H */
diff --git a/fs/bcachefs/vstructs.h b/fs/bcachefs/vstructs.h
new file mode 100644
index 000000000000..a6561b4b36a6
--- /dev/null
+++ b/fs/bcachefs/vstructs.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VSTRUCTS_H
+#define _VSTRUCTS_H
+
+#include "util.h"
+
+/*
+ * NOTE: we can't differentiate between __le64 and u64 with type_is - this
+ * assumes u64 is little endian:
+ */
+#define __vstruct_u64s(_s) \
+({ \
+ ( type_is((_s)->u64s, u64) ? le64_to_cpu((__force __le64) (_s)->u64s) \
+ : type_is((_s)->u64s, u32) ? le32_to_cpu((__force __le32) (_s)->u64s) \
+ : type_is((_s)->u64s, u16) ? le16_to_cpu((__force __le16) (_s)->u64s) \
+ : ((__force u8) ((_s)->u64s))); \
+})
+
+#define __vstruct_bytes(_type, _u64s) \
+({ \
+ BUILD_BUG_ON(offsetof(_type, _data) % sizeof(u64)); \
+ \
+ (size_t) (offsetof(_type, _data) + (_u64s) * sizeof(u64)); \
+})
+
+#define vstruct_bytes(_s) \
+ __vstruct_bytes(typeof(*(_s)), __vstruct_u64s(_s))
+
+#define __vstruct_blocks(_type, _sector_block_bits, _u64s) \
+ (round_up(__vstruct_bytes(_type, _u64s), \
+ 512 << (_sector_block_bits)) >> (9 + (_sector_block_bits)))
+
+#define vstruct_blocks(_s, _sector_block_bits) \
+ __vstruct_blocks(typeof(*(_s)), _sector_block_bits, __vstruct_u64s(_s))
+
+#define vstruct_blocks_plus(_s, _sector_block_bits, _u64s) \
+ __vstruct_blocks(typeof(*(_s)), _sector_block_bits, \
+ __vstruct_u64s(_s) + (_u64s))
+
+#define vstruct_sectors(_s, _sector_block_bits) \
+ (round_up(vstruct_bytes(_s), 512 << (_sector_block_bits)) >> 9)
+
+#define vstruct_next(_s) \
+ ((typeof(_s)) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
+#define vstruct_last(_s) \
+ ((typeof(&(_s)->start[0])) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
+#define vstruct_end(_s) \
+ ((void *) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
+
+#define vstruct_for_each(_s, _i) \
+ for (_i = (_s)->start; \
+ _i < vstruct_last(_s); \
+ _i = vstruct_next(_i))
+
+#define vstruct_for_each_safe(_s, _i, _t) \
+ for (_i = (_s)->start; \
+ _i < vstruct_last(_s) && (_t = vstruct_next(_i), true); \
+ _i = _t)
+
+#define vstruct_idx(_s, _idx) \
+ ((typeof(&(_s)->start[0])) ((_s)->_data + (_idx)))
+
+#endif /* _VSTRUCTS_H */
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
new file mode 100644
index 000000000000..b069b1a62e25
--- /dev/null
+++ b/fs/bcachefs/xattr.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "acl.h"
+#include "bkey_methods.h"
+#include "btree_update.h"
+#include "extents.h"
+#include "fs.h"
+#include "rebalance.h"
+#include "str_hash.h"
+#include "xattr.h"
+
+#include <linux/dcache.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned);
+
+static u64 bch2_xattr_hash(const struct bch_hash_info *info,
+ const struct xattr_search_key *key)
+{
+ struct bch_str_hash_ctx ctx;
+
+ bch2_str_hash_init(&ctx, info);
+ bch2_str_hash_update(&ctx, info, &key->type, sizeof(key->type));
+ bch2_str_hash_update(&ctx, info, key->name.name, key->name.len);
+
+ return bch2_str_hash_end(&ctx, info);
+}
+
+static u64 xattr_hash_key(const struct bch_hash_info *info, const void *key)
+{
+ return bch2_xattr_hash(info, key);
+}
+
+static u64 xattr_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
+{
+ struct bkey_s_c_xattr x = bkey_s_c_to_xattr(k);
+
+ return bch2_xattr_hash(info,
+ &X_SEARCH(x.v->x_type, x.v->x_name, x.v->x_name_len));
+}
+
+static bool xattr_cmp_key(struct bkey_s_c _l, const void *_r)
+{
+ struct bkey_s_c_xattr l = bkey_s_c_to_xattr(_l);
+ const struct xattr_search_key *r = _r;
+
+ return l.v->x_type != r->type ||
+ l.v->x_name_len != r->name.len ||
+ memcmp(l.v->x_name, r->name.name, r->name.len);
+}
+
+static bool xattr_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
+{
+ struct bkey_s_c_xattr l = bkey_s_c_to_xattr(_l);
+ struct bkey_s_c_xattr r = bkey_s_c_to_xattr(_r);
+
+ return l.v->x_type != r.v->x_type ||
+ l.v->x_name_len != r.v->x_name_len ||
+ memcmp(l.v->x_name, r.v->x_name, r.v->x_name_len);
+}
+
+const struct bch_hash_desc bch2_xattr_hash_desc = {
+ .btree_id = BTREE_ID_xattrs,
+ .key_type = KEY_TYPE_xattr,
+ .hash_key = xattr_hash_key,
+ .hash_bkey = xattr_hash_bkey,
+ .cmp_key = xattr_cmp_key,
+ .cmp_bkey = xattr_cmp_bkey,
+};
+
+int bch2_xattr_invalid(const struct bch_fs *c, struct bkey_s_c k,
+ enum bkey_invalid_flags flags,
+ struct printbuf *err)
+{
+ const struct xattr_handler *handler;
+ struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
+
+ if (bkey_val_u64s(k.k) <
+ xattr_val_u64s(xattr.v->x_name_len,
+ le16_to_cpu(xattr.v->x_val_len))) {
+ prt_printf(err, "value too small (%zu < %u)",
+ bkey_val_u64s(k.k),
+ xattr_val_u64s(xattr.v->x_name_len,
+ le16_to_cpu(xattr.v->x_val_len)));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ /* XXX why +4 ? */
+ if (bkey_val_u64s(k.k) >
+ xattr_val_u64s(xattr.v->x_name_len,
+ le16_to_cpu(xattr.v->x_val_len) + 4)) {
+ prt_printf(err, "value too big (%zu > %u)",
+ bkey_val_u64s(k.k),
+ xattr_val_u64s(xattr.v->x_name_len,
+ le16_to_cpu(xattr.v->x_val_len) + 4));
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ handler = bch2_xattr_type_to_handler(xattr.v->x_type);
+ if (!handler) {
+ prt_printf(err, "invalid type (%u)", xattr.v->x_type);
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ if (memchr(xattr.v->x_name, '\0', xattr.v->x_name_len)) {
+ prt_printf(err, "xattr name has invalid characters");
+ return -BCH_ERR_invalid_bkey;
+ }
+
+ return 0;
+}
+
+void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ const struct xattr_handler *handler;
+ struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
+
+ handler = bch2_xattr_type_to_handler(xattr.v->x_type);
+ if (handler && handler->prefix)
+ prt_printf(out, "%s", handler->prefix);
+ else if (handler)
+ prt_printf(out, "(type %u)", xattr.v->x_type);
+ else
+ prt_printf(out, "(unknown type %u)", xattr.v->x_type);
+
+ prt_printf(out, "%.*s:%.*s",
+ xattr.v->x_name_len,
+ xattr.v->x_name,
+ le16_to_cpu(xattr.v->x_val_len),
+ (char *) xattr_val(xattr.v));
+
+ if (xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS ||
+ xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT) {
+ prt_char(out, ' ');
+ bch2_acl_to_text(out, xattr_val(xattr.v),
+ le16_to_cpu(xattr.v->x_val_len));
+ }
+}
+
+static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info *inode,
+ const char *name, void *buffer, size_t size, int type)
+{
+ struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
+ struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
+ struct btree_iter iter;
+ struct bkey_s_c_xattr xattr;
+ struct bkey_s_c k;
+ int ret;
+
+ ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
+ inode_inum(inode), &search, 0);
+ if (ret)
+ goto err1;
+
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto err2;
+
+ xattr = bkey_s_c_to_xattr(k);
+ ret = le16_to_cpu(xattr.v->x_val_len);
+ if (buffer) {
+ if (ret > size)
+ ret = -ERANGE;
+ else
+ memcpy(buffer, xattr_val(xattr.v), ret);
+ }
+err2:
+ bch2_trans_iter_exit(trans, &iter);
+err1:
+ return ret < 0 && bch2_err_matches(ret, ENOENT) ? -ENODATA : ret;
+}
+
+int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
+ struct bch_inode_unpacked *inode_u,
+ const struct bch_hash_info *hash_info,
+ const char *name, const void *value, size_t size,
+ int type, int flags)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter inode_iter = { NULL };
+ int ret;
+
+ ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
+ if (ret)
+ return ret;
+
+ inode_u->bi_ctime = bch2_current_time(c);
+
+ ret = bch2_inode_write(trans, &inode_iter, inode_u);
+ bch2_trans_iter_exit(trans, &inode_iter);
+
+ if (ret)
+ return ret;
+
+ if (value) {
+ struct bkey_i_xattr *xattr;
+ unsigned namelen = strlen(name);
+ unsigned u64s = BKEY_U64s +
+ xattr_val_u64s(namelen, size);
+
+ if (u64s > U8_MAX)
+ return -ERANGE;
+
+ xattr = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
+ if (IS_ERR(xattr))
+ return PTR_ERR(xattr);
+
+ bkey_xattr_init(&xattr->k_i);
+ xattr->k.u64s = u64s;
+ xattr->v.x_type = type;
+ xattr->v.x_name_len = namelen;
+ xattr->v.x_val_len = cpu_to_le16(size);
+ memcpy(xattr->v.x_name, name, namelen);
+ memcpy(xattr_val(&xattr->v), value, size);
+
+ ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
+ inum, &xattr->k_i,
+ (flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
+ (flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0));
+ } else {
+ struct xattr_search_key search =
+ X_SEARCH(type, name, strlen(name));
+
+ ret = bch2_hash_delete(trans, bch2_xattr_hash_desc,
+ hash_info, inum, &search);
+ }
+
+ if (bch2_err_matches(ret, ENOENT))
+ ret = flags & XATTR_REPLACE ? -ENODATA : 0;
+
+ return ret;
+}
+
+struct xattr_buf {
+ char *buf;
+ size_t len;
+ size_t used;
+};
+
+static int __bch2_xattr_emit(const char *prefix,
+ const char *name, size_t name_len,
+ struct xattr_buf *buf)
+{
+ const size_t prefix_len = strlen(prefix);
+ const size_t total_len = prefix_len + name_len + 1;
+
+ if (buf->buf) {
+ if (buf->used + total_len > buf->len)
+ return -ERANGE;
+
+ memcpy(buf->buf + buf->used, prefix, prefix_len);
+ memcpy(buf->buf + buf->used + prefix_len,
+ name, name_len);
+ buf->buf[buf->used + prefix_len + name_len] = '\0';
+ }
+
+ buf->used += total_len;
+ return 0;
+}
+
+static int bch2_xattr_emit(struct dentry *dentry,
+ const struct bch_xattr *xattr,
+ struct xattr_buf *buf)
+{
+ const struct xattr_handler *handler =
+ bch2_xattr_type_to_handler(xattr->x_type);
+
+ return handler && (!handler->list || handler->list(dentry))
+ ? __bch2_xattr_emit(handler->prefix ?: handler->name,
+ xattr->x_name, xattr->x_name_len, buf)
+ : 0;
+}
+
+static int bch2_xattr_list_bcachefs(struct bch_fs *c,
+ struct bch_inode_unpacked *inode,
+ struct xattr_buf *buf,
+ bool all)
+{
+ const char *prefix = all ? "bcachefs_effective." : "bcachefs.";
+ unsigned id;
+ int ret = 0;
+ u64 v;
+
+ for (id = 0; id < Inode_opt_nr; id++) {
+ v = bch2_inode_opt_get(inode, id);
+ if (!v)
+ continue;
+
+ if (!all &&
+ !(inode->bi_fields_set & (1 << id)))
+ continue;
+
+ ret = __bch2_xattr_emit(prefix, bch2_inode_opts[id],
+ strlen(bch2_inode_opts[id]), buf);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+{
+ struct bch_fs *c = dentry->d_sb->s_fs_info;
+ struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct xattr_buf buf = { .buf = buffer, .len = buffer_size };
+ u64 offset = 0, inum = inode->ei_inode.bi_inum;
+ u32 snapshot;
+ int ret;
+retry:
+ bch2_trans_begin(trans);
+ iter = (struct btree_iter) { NULL };
+
+ ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot);
+ if (ret)
+ goto err;
+
+ for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_xattrs,
+ SPOS(inum, offset, snapshot),
+ POS(inum, U64_MAX), 0, k, ret) {
+ if (k.k->type != KEY_TYPE_xattr)
+ continue;
+
+ ret = bch2_xattr_emit(dentry, bkey_s_c_to_xattr(k).v, &buf);
+ if (ret)
+ break;
+ }
+
+ offset = iter.pos.offset;
+ bch2_trans_iter_exit(trans, &iter);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ bch2_trans_put(trans);
+
+ if (ret)
+ goto out;
+
+ ret = bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, false);
+ if (ret)
+ goto out;
+
+ ret = bch2_xattr_list_bcachefs(c, &inode->ei_inode, &buf, true);
+ if (ret)
+ goto out;
+
+ return buf.used;
+out:
+ return bch2_err_class(ret);
+}
+
+static int bch2_xattr_get_handler(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *vinode,
+ const char *name, void *buffer, size_t size)
+{
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ int ret = bch2_trans_do(c, NULL, NULL, 0,
+ bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags));
+
+ return bch2_err_class(ret);
+}
+
+static int bch2_xattr_set_handler(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *dentry, struct inode *vinode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
+ struct bch_inode_unpacked inode_u;
+ int ret;
+
+ ret = bch2_trans_run(c,
+ commit_do(trans, NULL, NULL, 0,
+ bch2_xattr_set(trans, inode_inum(inode), &inode_u,
+ &hash, name, value, size,
+ handler->flags, flags)) ?:
+ (bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME), 0));
+
+ return bch2_err_class(ret);
+}
+
+static const struct xattr_handler bch_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .get = bch2_xattr_get_handler,
+ .set = bch2_xattr_set_handler,
+ .flags = KEY_TYPE_XATTR_INDEX_USER,
+};
+
+static bool bch2_xattr_trusted_list(struct dentry *dentry)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+static const struct xattr_handler bch_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .list = bch2_xattr_trusted_list,
+ .get = bch2_xattr_get_handler,
+ .set = bch2_xattr_set_handler,
+ .flags = KEY_TYPE_XATTR_INDEX_TRUSTED,
+};
+
+static const struct xattr_handler bch_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .get = bch2_xattr_get_handler,
+ .set = bch2_xattr_set_handler,
+ .flags = KEY_TYPE_XATTR_INDEX_SECURITY,
+};
+
+#ifndef NO_BCACHEFS_FS
+
+static int opt_to_inode_opt(int id)
+{
+ switch (id) {
+#define x(name, ...) \
+ case Opt_##name: return Inode_opt_##name;
+ BCH_INODE_OPTS()
+#undef x
+ default:
+ return -1;
+ }
+}
+
+static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *vinode,
+ const char *name, void *buffer, size_t size,
+ bool all)
+{
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ struct bch_opts opts =
+ bch2_inode_opts_to_opts(&inode->ei_inode);
+ const struct bch_option *opt;
+ int id, inode_opt_id;
+ struct printbuf out = PRINTBUF;
+ int ret;
+ u64 v;
+
+ id = bch2_opt_lookup(name);
+ if (id < 0 || !bch2_opt_is_inode_opt(id))
+ return -EINVAL;
+
+ inode_opt_id = opt_to_inode_opt(id);
+ if (inode_opt_id < 0)
+ return -EINVAL;
+
+ opt = bch2_opt_table + id;
+
+ if (!bch2_opt_defined_by_id(&opts, id))
+ return -ENODATA;
+
+ if (!all &&
+ !(inode->ei_inode.bi_fields_set & (1 << inode_opt_id)))
+ return -ENODATA;
+
+ v = bch2_opt_get_by_id(&opts, id);
+ bch2_opt_to_text(&out, c, c->disk_sb.sb, opt, v, 0);
+
+ ret = out.pos;
+
+ if (out.allocation_failure) {
+ ret = -ENOMEM;
+ } else if (buffer) {
+ if (out.pos > size)
+ ret = -ERANGE;
+ else
+ memcpy(buffer, out.buf, out.pos);
+ }
+
+ printbuf_exit(&out);
+ return ret;
+}
+
+static int bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *vinode,
+ const char *name, void *buffer, size_t size)
+{
+ return __bch2_xattr_bcachefs_get(handler, dentry, vinode,
+ name, buffer, size, false);
+}
+
+struct inode_opt_set {
+ int id;
+ u64 v;
+ bool defined;
+};
+
+static int inode_opt_set_fn(struct btree_trans *trans,
+ struct bch_inode_info *inode,
+ struct bch_inode_unpacked *bi,
+ void *p)
+{
+ struct inode_opt_set *s = p;
+
+ if (s->defined)
+ bi->bi_fields_set |= 1U << s->id;
+ else
+ bi->bi_fields_set &= ~(1U << s->id);
+
+ bch2_inode_opt_set(bi, s->id, s->v);
+
+ return 0;
+}
+
+static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *dentry, struct inode *vinode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct bch_inode_info *inode = to_bch_ei(vinode);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
+ const struct bch_option *opt;
+ char *buf;
+ struct inode_opt_set s;
+ int opt_id, inode_opt_id, ret;
+
+ opt_id = bch2_opt_lookup(name);
+ if (opt_id < 0)
+ return -EINVAL;
+
+ opt = bch2_opt_table + opt_id;
+
+ inode_opt_id = opt_to_inode_opt(opt_id);
+ if (inode_opt_id < 0)
+ return -EINVAL;
+
+ s.id = inode_opt_id;
+
+ if (value) {
+ u64 v = 0;
+
+ buf = kmalloc(size + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memcpy(buf, value, size);
+ buf[size] = '\0';
+
+ ret = bch2_opt_parse(c, opt, buf, &v, NULL);
+ kfree(buf);
+
+ if (ret < 0)
+ return ret;
+
+ ret = bch2_opt_check_may_set(c, opt_id, v);
+ if (ret < 0)
+ return ret;
+
+ s.v = v + 1;
+ s.defined = true;
+ } else {
+ if (!IS_ROOT(dentry)) {
+ struct bch_inode_info *dir =
+ to_bch_ei(d_inode(dentry->d_parent));
+
+ s.v = bch2_inode_opt_get(&dir->ei_inode, inode_opt_id);
+ } else {
+ s.v = 0;
+ }
+
+ s.defined = false;
+ }
+
+ mutex_lock(&inode->ei_update_lock);
+ if (inode_opt_id == Inode_opt_project) {
+ /*
+ * inode fields accessible via the xattr interface are stored
+ * with a +1 bias, so that 0 means unset:
+ */
+ ret = bch2_set_projid(c, inode, s.v ? s.v - 1 : 0);
+ if (ret)
+ goto err;
+ }
+
+ ret = bch2_write_inode(c, inode, inode_opt_set_fn, &s, 0);
+err:
+ mutex_unlock(&inode->ei_update_lock);
+
+ if (value &&
+ (opt_id == Opt_background_compression ||
+ opt_id == Opt_background_target))
+ bch2_rebalance_add_work(c, inode->v.i_blocks);
+
+ return bch2_err_class(ret);
+}
+
+static const struct xattr_handler bch_xattr_bcachefs_handler = {
+ .prefix = "bcachefs.",
+ .get = bch2_xattr_bcachefs_get,
+ .set = bch2_xattr_bcachefs_set,
+};
+
+static int bch2_xattr_bcachefs_get_effective(
+ const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *vinode,
+ const char *name, void *buffer, size_t size)
+{
+ return __bch2_xattr_bcachefs_get(handler, dentry, vinode,
+ name, buffer, size, true);
+}
+
+static const struct xattr_handler bch_xattr_bcachefs_effective_handler = {
+ .prefix = "bcachefs_effective.",
+ .get = bch2_xattr_bcachefs_get_effective,
+ .set = bch2_xattr_bcachefs_set,
+};
+
+#endif /* NO_BCACHEFS_FS */
+
+const struct xattr_handler *bch2_xattr_handlers[] = {
+ &bch_xattr_user_handler,
+#ifdef CONFIG_BCACHEFS_POSIX_ACL
+ &nop_posix_acl_access,
+ &nop_posix_acl_default,
+#endif
+ &bch_xattr_trusted_handler,
+ &bch_xattr_security_handler,
+#ifndef NO_BCACHEFS_FS
+ &bch_xattr_bcachefs_handler,
+ &bch_xattr_bcachefs_effective_handler,
+#endif
+ NULL
+};
+
+static const struct xattr_handler *bch_xattr_handler_map[] = {
+ [KEY_TYPE_XATTR_INDEX_USER] = &bch_xattr_user_handler,
+ [KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS] =
+ &nop_posix_acl_access,
+ [KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT] =
+ &nop_posix_acl_default,
+ [KEY_TYPE_XATTR_INDEX_TRUSTED] = &bch_xattr_trusted_handler,
+ [KEY_TYPE_XATTR_INDEX_SECURITY] = &bch_xattr_security_handler,
+};
+
+static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned type)
+{
+ return type < ARRAY_SIZE(bch_xattr_handler_map)
+ ? bch_xattr_handler_map[type]
+ : NULL;
+}
diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h
new file mode 100644
index 000000000000..f5a52e3a6016
--- /dev/null
+++ b/fs/bcachefs/xattr.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_XATTR_H
+#define _BCACHEFS_XATTR_H
+
+#include "str_hash.h"
+
+extern const struct bch_hash_desc bch2_xattr_hash_desc;
+
+int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c,
+ enum bkey_invalid_flags, struct printbuf *);
+void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_xattr ((struct bkey_ops) { \
+ .key_invalid = bch2_xattr_invalid, \
+ .val_to_text = bch2_xattr_to_text, \
+ .min_val_size = 8, \
+})
+
+static inline unsigned xattr_val_u64s(unsigned name_len, unsigned val_len)
+{
+ return DIV_ROUND_UP(offsetof(struct bch_xattr, x_name) +
+ name_len + val_len, sizeof(u64));
+}
+
+#define xattr_val(_xattr) \
+ ((void *) (_xattr)->x_name + (_xattr)->x_name_len)
+
+struct xattr_search_key {
+ u8 type;
+ struct qstr name;
+};
+
+#define X_SEARCH(_type, _name, _len) ((struct xattr_search_key) \
+ { .type = _type, .name = QSTR_INIT(_name, _len) })
+
+struct dentry;
+struct xattr_handler;
+struct bch_hash_info;
+struct bch_inode_info;
+
+/* Exported for cmd_migrate.c in tools: */
+int bch2_xattr_set(struct btree_trans *, subvol_inum,
+ struct bch_inode_unpacked *, const struct bch_hash_info *,
+ const char *, const void *, size_t, int, int);
+
+ssize_t bch2_xattr_list(struct dentry *, char *, size_t);
+
+extern const struct xattr_handler *bch2_xattr_handlers[];
+
+#endif /* _BCACHEFS_XATTR_H */
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 9a16a51fbb88..9acdec56f626 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -360,11 +360,11 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
* for indexing purposes. (PFD, page 54)
*/
- inode->i_mtime.tv_sec =
- fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16;
- inode->i_mtime.tv_nsec = 0; /* lower 16 bits are not a time */
- inode_set_ctime_to_ts(inode, inode->i_mtime);
- inode->i_atime = inode->i_mtime;
+ inode_set_mtime(inode,
+ fs64_to_cpu(sb, raw_inode->last_modified_time) >> 16,
+ 0);/* lower 16 bits are not a time */
+ inode_set_ctime_to_ts(inode, inode_get_mtime(inode));
+ inode_set_atime_to_ts(inode, inode_get_mtime(inode));
befs_ino->i_inode_num = fsrun_to_cpu(sb, raw_inode->inode_num);
befs_ino->i_parent = fsrun_to_cpu(sb, raw_inode->parent);
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 12b8af04dcb3..fbc4ae80a4b2 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -97,7 +97,7 @@ static int bfs_create(struct mnt_idmap *idmap, struct inode *dir,
set_bit(ino, info->si_imap);
info->si_freei--;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
inode->i_op = &bfs_file_inops;
inode->i_fop = &bfs_file_operations;
@@ -187,7 +187,7 @@ static int bfs_unlink(struct inode *dir, struct dentry *dentry)
}
de->ino = 0;
mark_buffer_dirty_inode(bh, dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
inode_dec_link_count(inode);
@@ -240,7 +240,7 @@ static int bfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
goto end_rename;
}
old_de->ino = 0;
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
mark_inode_dirty(old_dir);
if (new_inode) {
inode_set_ctime_current(new_inode);
@@ -294,7 +294,8 @@ static int bfs_add_entry(struct inode *dir, const struct qstr *child, int ino)
dir->i_size += BFS_DIRENT_SIZE;
inode_set_ctime_current(dir);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_current(dir));
mark_inode_dirty(dir);
de->ino = cpu_to_le16((u16)ino);
for (i = 0; i < BFS_NAMELEN; i++)
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index e6a76ae9eb44..355957dbce39 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -80,11 +80,9 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
set_nlink(inode, le32_to_cpu(di->i_nlink));
inode->i_size = BFS_FILESIZE(di);
inode->i_blocks = BFS_FILEBLOCKS(di);
- inode->i_atime.tv_sec = le32_to_cpu(di->i_atime);
- inode->i_mtime.tv_sec = le32_to_cpu(di->i_mtime);
+ inode_set_atime(inode, le32_to_cpu(di->i_atime), 0);
+ inode_set_mtime(inode, le32_to_cpu(di->i_mtime), 0);
inode_set_ctime(inode, le32_to_cpu(di->i_ctime), 0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
brelse(bh);
unlock_new_inode(inode);
@@ -140,9 +138,9 @@ static int bfs_write_inode(struct inode *inode, struct writeback_control *wbc)
di->i_uid = cpu_to_le32(i_uid_read(inode));
di->i_gid = cpu_to_le32(i_gid_read(inode));
di->i_nlink = cpu_to_le32(inode->i_nlink);
- di->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- di->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
- di->i_ctime = cpu_to_le32(inode_get_ctime(inode).tv_sec);
+ di->i_atime = cpu_to_le32(inode_get_atime_sec(inode));
+ di->i_mtime = cpu_to_le32(inode_get_mtime_sec(inode));
+ di->i_ctime = cpu_to_le32(inode_get_ctime_sec(inode));
i_sblock = BFS_I(inode)->i_sblock;
di->i_sblock = cpu_to_le32(i_sblock);
di->i_eblock = cpu_to_le32(BFS_I(inode)->i_eblock);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index e0108d17b085..5d2be9b0a0a5 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -547,7 +547,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index a25c9910d90b..4fb925e8c981 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -48,27 +48,6 @@ config BTRFS_FS_POSIX_ACL
If you don't know what Access Control Lists are, say N
-config BTRFS_FS_CHECK_INTEGRITY
- bool "Btrfs with integrity check tool compiled in (DEPRECATED)"
- depends on BTRFS_FS
- help
- This feature has been deprecated and will be removed in 6.7.
-
- Adds code that examines all block write requests (including
- writes of the super block). The goal is to verify that the
- state of the filesystem on disk is always consistent, i.e.,
- after a power-loss or kernel panic event the filesystem is
- in a consistent state.
-
- If the integrity check tool is included and activated in
- the mount options, plenty of kernel memory is used, and
- plenty of additional CPU cycles are spent. Enabling this
- functionality is not intended for normal use.
-
- In most cases, unless you are a btrfs developer who needs
- to verify the integrity of (super)-block write requests
- during the run of a regression test, say N
-
config BTRFS_FS_RUN_SANITY_TESTS
bool "Btrfs will run sanity tests upon loading"
depends on BTRFS_FS
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 90d53209755b..525af975f61c 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -33,10 +33,9 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
subpage.o tree-mod-log.o extent-io-tree.o fs.o messages.o bio.o \
- lru_cache.o
+ lru_cache.o raid-stripe-tree.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
-btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
btrfs-$(CONFIG_BLK_DEV_ZONED) += zoned.o
btrfs-$(CONFIG_FS_VERITY) += verity.o
diff --git a/fs/btrfs/accessors.h b/fs/btrfs/accessors.h
index 8cfc8214109c..aa0844535644 100644
--- a/fs/btrfs/accessors.h
+++ b/fs/btrfs/accessors.h
@@ -4,6 +4,7 @@
#define BTRFS_ACCESSORS_H
#include <linux/stddef.h>
+#include <asm/unaligned.h>
struct btrfs_map_token {
struct extent_buffer *eb;
@@ -305,6 +306,14 @@ BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
+BTRFS_SETGET_FUNCS(stripe_extent_encoding, struct btrfs_stripe_extent, encoding, 8);
+BTRFS_SETGET_FUNCS(raid_stride_devid, struct btrfs_raid_stride, devid, 64);
+BTRFS_SETGET_FUNCS(raid_stride_physical, struct btrfs_raid_stride, physical, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_stripe_extent_encoding,
+ struct btrfs_stripe_extent, encoding, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_devid, struct btrfs_raid_stride, devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_physical, struct btrfs_raid_stride, physical, 64);
+
/* struct btrfs_dev_extent */
BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, chunk_tree, 64);
BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
@@ -349,6 +358,9 @@ BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, count, 3
BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, count, 32);
+BTRFS_SETGET_FUNCS(extent_owner_ref_root_id, struct btrfs_extent_owner_ref,
+ root_id, 64);
+
BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref,
type, 8);
BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref,
@@ -365,6 +377,8 @@ static inline u32 btrfs_extent_inline_ref_size(int type)
if (type == BTRFS_EXTENT_DATA_REF_KEY)
return sizeof(struct btrfs_extent_data_ref) +
offsetof(struct btrfs_extent_inline_ref, offset);
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY)
+ return sizeof(struct btrfs_extent_inline_ref);
return 0;
}
@@ -966,6 +980,8 @@ BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
flags, 64);
BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item,
rescan, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_enable_gen, struct btrfs_qgroup_status_item,
+ enable_gen, 64);
/* btrfs_qgroup_info_item */
BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index ce083e99ef68..9e261aac671e 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
+#include <trace/events/btrfs.h>
#include "async-thread.h"
#include "ctree.h"
@@ -242,7 +243,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
break;
trace_btrfs_ordered_sched(work);
spin_unlock_irqrestore(lock, flags);
- work->ordered_func(work);
+ work->ordered_func(work, false);
/* now take the lock again and drop our item from the list */
spin_lock_irqsave(lock, flags);
@@ -277,7 +278,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
* We don't want to call the ordered free functions with
* the lock held.
*/
- work->ordered_free(work);
+ work->ordered_func(work, true);
/* NB: work must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, work);
}
@@ -285,7 +286,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
spin_unlock_irqrestore(lock, flags);
if (free_self) {
- self->ordered_free(self);
+ self->ordered_func(self, true);
/* NB: self must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, self);
}
@@ -300,7 +301,7 @@ static void btrfs_work_helper(struct work_struct *normal_work)
/*
* We should not touch things inside work in the following cases:
- * 1) after work->func() if it has no ordered_free
+ * 1) after work->func() if it has no ordered_func(..., true) to free
* Since the struct is freed in work->func().
* 2) after setting WORK_DONE_BIT
* The work may be freed in other threads almost instantly.
@@ -329,11 +330,10 @@ static void btrfs_work_helper(struct work_struct *normal_work)
}
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
- btrfs_func_t ordered_func, btrfs_func_t ordered_free)
+ btrfs_ordered_func_t ordered_func)
{
work->func = func;
work->ordered_func = ordered_func;
- work->ordered_free = ordered_free;
INIT_WORK(&work->normal_work, btrfs_work_helper);
INIT_LIST_HEAD(&work->ordered_list);
work->flags = 0;
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index 30f66c5e2e6e..62b8a0d57898 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -13,11 +13,11 @@ struct btrfs_fs_info;
struct btrfs_workqueue;
struct btrfs_work;
typedef void (*btrfs_func_t)(struct btrfs_work *arg);
+typedef void (*btrfs_ordered_func_t)(struct btrfs_work *arg, bool);
struct btrfs_work {
btrfs_func_t func;
- btrfs_func_t ordered_func;
- btrfs_func_t ordered_free;
+ btrfs_ordered_func_t ordered_func;
/* Don't touch things below */
struct work_struct normal_work;
@@ -35,7 +35,7 @@ struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags);
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
- btrfs_func_t ordered_func, btrfs_func_t ordered_free);
+ btrfs_ordered_func_t ordered_func);
void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b7d54efb4728..beed7e459dab 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1129,6 +1129,9 @@ static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
count, sc, GFP_NOFS);
break;
}
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
+ break;
default:
WARN_ON(1);
}
@@ -2998,7 +3001,7 @@ int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
}
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_backref_cache *cache, int is_reloc)
+ struct btrfs_backref_cache *cache, bool is_reloc)
{
int i;
@@ -3196,12 +3199,14 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
* We still need to do a tree search to find out the parents. This is for
* TREE_BLOCK_REF backref (keyed or inlined).
*
+ * @trans: Transaction handle.
* @ref_key: The same as @ref_key in handle_direct_tree_backref()
* @tree_key: The first key of this tree block.
* @path: A clean (released) path, to avoid allocating path every time
* the function get called.
*/
-static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
+static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_key *ref_key,
struct btrfs_key *tree_key,
@@ -3315,7 +3320,7 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
* If we know the block isn't shared we can avoid
* checking its backrefs.
*/
- if (btrfs_block_can_be_shared(root, eb))
+ if (btrfs_block_can_be_shared(trans, root, eb))
upper->checked = 0;
else
upper->checked = 1;
@@ -3363,11 +3368,13 @@ out:
* links aren't yet bi-directional. Needs to finish such links.
* Use btrfs_backref_finish_upper_links() to finish such linkage.
*
+ * @trans: Transaction handle.
* @path: Released path for indirect tree backref lookup
* @iter: Released backref iter for extent tree search
* @node_key: The first key of the tree block
*/
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
@@ -3467,8 +3474,8 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
* offset means the root objectid. We need to search
* the tree to get its parent bytenr.
*/
- ret = handle_indirect_tree_backref(cache, path, &key, node_key,
- cur);
+ ret = handle_indirect_tree_backref(trans, cache, path,
+ &key, node_key, cur);
if (ret < 0)
goto out;
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 1616e3e3f1e4..ab4ca0eda605 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -247,7 +247,7 @@ struct prelim_ref {
struct rb_node rbnode;
u64 root_id;
struct btrfs_key key_for_search;
- int level;
+ u8 level;
int count;
struct extent_inode_elem *inode_list;
u64 parent;
@@ -440,11 +440,11 @@ struct btrfs_backref_cache {
* Reloction backref cache require more info for reloc root compared
* to generic backref cache.
*/
- unsigned int is_reloc;
+ bool is_reloc;
};
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
- struct btrfs_backref_cache *cache, int is_reloc);
+ struct btrfs_backref_cache *cache, bool is_reloc);
struct btrfs_backref_node *btrfs_backref_alloc_node(
struct btrfs_backref_cache *cache, u64 bytenr, int level);
struct btrfs_backref_edge *btrfs_backref_alloc_edge(
@@ -533,14 +533,15 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache);
static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
- u64 bytenr, int errno)
+ u64 bytenr, int error)
{
- btrfs_panic(fs_info, errno,
+ btrfs_panic(fs_info, error,
"Inconsistency in backref cache found at offset %llu",
bytenr);
}
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index 12b12443efaa..4f3b693a16b1 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -10,11 +10,11 @@
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
-#include "check-integrity.h"
#include "dev-replace.h"
#include "rcu-string.h"
#include "zoned.h"
#include "file-item.h"
+#include "raid-stripe-tree.h"
static struct bio_set btrfs_bioset;
static struct bio_set btrfs_clone_bioset;
@@ -416,6 +416,9 @@ static void btrfs_orig_write_end_io(struct bio *bio)
else
bio->bi_status = BLK_STS_OK;
+ if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
+ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
btrfs_orig_bbio_end_io(bbio);
btrfs_put_bioc(bioc);
}
@@ -427,6 +430,8 @@ static void btrfs_clone_write_end_io(struct bio *bio)
if (bio->bi_status) {
atomic_inc(&stripe->bioc->error);
btrfs_log_dev_io_error(bio, stripe->dev);
+ } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
}
/* Pass on control to the original bio this one was cloned from */
@@ -463,8 +468,6 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
(unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev),
dev->devid, bio->bi_iter.bi_size);
- btrfsic_check_bio(bio);
-
if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
blkcg_punt_bio_submit(bio);
else
@@ -490,6 +493,7 @@ static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
bio->bi_private = &bioc->stripes[dev_nr];
bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
bioc->stripes[dev_nr].bioc = bioc;
+ bioc->size = bio->bi_iter.bi_size;
btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
}
@@ -499,6 +503,8 @@ static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
if (!bioc) {
/* Single mirror read/write fast path. */
btrfs_bio(bio)->mirror_num = mirror_num;
+ if (bio_op(bio) != REQ_OP_READ)
+ btrfs_bio(bio)->orig_physical = smap->physical;
bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
if (bio_op(bio) != REQ_OP_READ)
btrfs_bio(bio)->orig_physical = smap->physical;
@@ -568,13 +574,20 @@ static void run_one_async_start(struct btrfs_work *work)
*
* At IO completion time the csums attached on the ordered extent record are
* inserted into the tree.
+ *
+ * If called with @do_free == true, then it will free the work struct.
*/
-static void run_one_async_done(struct btrfs_work *work)
+static void run_one_async_done(struct btrfs_work *work, bool do_free)
{
struct async_submit_bio *async =
container_of(work, struct async_submit_bio, work);
struct bio *bio = &async->bbio->bio;
+ if (do_free) {
+ kfree(container_of(work, struct async_submit_bio, work));
+ return;
+ }
+
/* If an error occurred we just want to clean up the bio and move on. */
if (bio->bi_status) {
btrfs_orig_bbio_end_io(async->bbio);
@@ -590,11 +603,6 @@ static void run_one_async_done(struct btrfs_work *work)
__btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
}
-static void run_one_async_free(struct btrfs_work *work)
-{
- kfree(container_of(work, struct async_submit_bio, work));
-}
-
static bool should_async_write(struct btrfs_bio *bbio)
{
/* Submit synchronously if the checksum implementation is fast. */
@@ -636,8 +644,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
async->smap = *smap;
async->mirror_num = mirror_num;
- btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
- run_one_async_free);
+ btrfs_init_work(&async->work, run_one_async_start, run_one_async_done);
btrfs_queue_work(fs_info->workers, &async->work);
return true;
}
@@ -657,9 +664,11 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
blk_status_t ret;
int error;
+ smap.is_scrub = !bbio->inode;
+
btrfs_bio_counter_inc_blocked(fs_info);
error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
- &bioc, &smap, &mirror_num, 1);
+ &bioc, &smap, &mirror_num);
if (error) {
ret = errno_to_blk_status(error);
goto fail;
@@ -691,6 +700,18 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
bio->bi_opf |= REQ_OP_ZONE_APPEND;
}
+ if (is_data_bbio(bbio) && bioc &&
+ btrfs_need_stripe_tree_update(bioc->fs_info, bioc->map_type)) {
+ /*
+ * No locking for the list update, as we only add to
+ * the list in the I/O submission path, and list
+ * iteration only happens in the completion path, which
+ * can't happen until after the last submission.
+ */
+ btrfs_get_bioc(bioc);
+ list_add_tail(&bioc->rst_ordered_entry, &bbio->ordered->bioc_list);
+ }
+
/*
* Csum items for reloc roots have already been cloned at this
* point, so they are handled as part of the no-checksum case.
@@ -779,8 +800,6 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
__bio_add_page(&bio, page, length, pg_offset);
-
- btrfsic_check_bio(&bio);
ret = submit_bio_wait(&bio);
if (ret) {
/* try to remap that extent elsewhere? */
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index b2e5107b7cec..6e5dc68ff661 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -935,7 +935,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
caching_ctl->block_group = cache;
refcount_set(&caching_ctl->count, 2);
atomic_set(&caching_ctl->progress, 0);
- btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
+ btrfs_init_work(&caching_ctl->work, caching_thread, NULL);
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
@@ -1286,7 +1286,7 @@ out:
/* Once for the lookup reference */
btrfs_put_block_group(block_group);
if (remove_rsv)
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
btrfs_free_path(path);
return ret;
}
@@ -2601,7 +2601,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@@ -2709,7 +2709,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
/* Already aborted the transaction if it failed. */
next:
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
list_del_init(&block_group->bg_list);
clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
}
@@ -2819,8 +2819,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
#endif
list_add_tail(&cache->bg_list, &trans->new_bgs);
- trans->delayed_ref_updates++;
- btrfs_update_delayed_refs_rsv(trans);
+ btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info);
set_avail_alloc_bits(fs_info, type);
return cache;
@@ -3025,7 +3024,7 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
cache->global_root_id);
btrfs_set_stack_block_group_flags(&bgi, cache->flags);
write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
fail:
btrfs_release_path(path);
/*
@@ -3051,7 +3050,6 @@ static int cache_save_setup(struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct btrfs_root *root = fs_info->tree_root;
struct inode *inode = NULL;
struct extent_changeset *data_reserved = NULL;
u64 alloc_hint = 0;
@@ -3103,7 +3101,7 @@ again:
* time.
*/
BTRFS_I(inode)->generation = 0;
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret) {
/*
* So theoretically we could recover from this, simply set the
@@ -3370,7 +3368,7 @@ again:
if (should_put)
btrfs_put_block_group(cache);
if (drop_reserve)
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
/*
* Avoid blocking other tasks for too long. It might even save
* us from writing caches for block groups that are going to be
@@ -3474,8 +3472,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
cache_save_setup(cache, trans, path);
if (!ret)
- ret = btrfs_run_delayed_refs(trans,
- (unsigned long) -1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
@@ -3518,7 +3515,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
/* If its not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
@@ -3543,12 +3540,12 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, bool alloc)
{
struct btrfs_fs_info *info = trans->fs_info;
- struct btrfs_block_group *cache = NULL;
- u64 total = num_bytes;
+ struct btrfs_space_info *space_info;
+ struct btrfs_block_group *cache;
u64 old_val;
- u64 byte_in_group;
+ bool reclaim = false;
+ bool bg_already_dirty = true;
int factor;
- int ret = 0;
/* Block accounting for super block */
spin_lock(&info->delalloc_root_lock);
@@ -3560,97 +3557,86 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
btrfs_set_super_bytes_used(info->super_copy, old_val);
spin_unlock(&info->delalloc_root_lock);
- while (total) {
- struct btrfs_space_info *space_info;
- bool reclaim = false;
-
- cache = btrfs_lookup_block_group(info, bytenr);
- if (!cache) {
- ret = -ENOENT;
- break;
- }
- space_info = cache->space_info;
- factor = btrfs_bg_type_to_factor(cache->flags);
+ cache = btrfs_lookup_block_group(info, bytenr);
+ if (!cache)
+ return -ENOENT;
- /*
- * If this block group has free space cache written out, we
- * need to make sure to load it if we are removing space. This
- * is because we need the unpinning stage to actually add the
- * space back to the block group, otherwise we will leak space.
- */
- if (!alloc && !btrfs_block_group_done(cache))
- btrfs_cache_block_group(cache, true);
+ /* An extent can not span multiple block groups. */
+ ASSERT(bytenr + num_bytes <= cache->start + cache->length);
- byte_in_group = bytenr - cache->start;
- WARN_ON(byte_in_group > cache->length);
+ space_info = cache->space_info;
+ factor = btrfs_bg_type_to_factor(cache->flags);
- spin_lock(&space_info->lock);
- spin_lock(&cache->lock);
+ /*
+ * If this block group has free space cache written out, we need to make
+ * sure to load it if we are removing space. This is because we need
+ * the unpinning stage to actually add the space back to the block group,
+ * otherwise we will leak space.
+ */
+ if (!alloc && !btrfs_block_group_done(cache))
+ btrfs_cache_block_group(cache, true);
- if (btrfs_test_opt(info, SPACE_CACHE) &&
- cache->disk_cache_state < BTRFS_DC_CLEAR)
- cache->disk_cache_state = BTRFS_DC_CLEAR;
+ spin_lock(&space_info->lock);
+ spin_lock(&cache->lock);
- old_val = cache->used;
- num_bytes = min(total, cache->length - byte_in_group);
- if (alloc) {
- old_val += num_bytes;
- cache->used = old_val;
- cache->reserved -= num_bytes;
- space_info->bytes_reserved -= num_bytes;
- space_info->bytes_used += num_bytes;
- space_info->disk_used += num_bytes * factor;
- spin_unlock(&cache->lock);
- spin_unlock(&space_info->lock);
- } else {
- old_val -= num_bytes;
- cache->used = old_val;
- cache->pinned += num_bytes;
- btrfs_space_info_update_bytes_pinned(info, space_info,
- num_bytes);
- space_info->bytes_used -= num_bytes;
- space_info->disk_used -= num_bytes * factor;
+ if (btrfs_test_opt(info, SPACE_CACHE) &&
+ cache->disk_cache_state < BTRFS_DC_CLEAR)
+ cache->disk_cache_state = BTRFS_DC_CLEAR;
- reclaim = should_reclaim_block_group(cache, num_bytes);
+ old_val = cache->used;
+ if (alloc) {
+ old_val += num_bytes;
+ cache->used = old_val;
+ cache->reserved -= num_bytes;
+ space_info->bytes_reserved -= num_bytes;
+ space_info->bytes_used += num_bytes;
+ space_info->disk_used += num_bytes * factor;
+ spin_unlock(&cache->lock);
+ spin_unlock(&space_info->lock);
+ } else {
+ old_val -= num_bytes;
+ cache->used = old_val;
+ cache->pinned += num_bytes;
+ btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
+ space_info->bytes_used -= num_bytes;
+ space_info->disk_used -= num_bytes * factor;
- spin_unlock(&cache->lock);
- spin_unlock(&space_info->lock);
+ reclaim = should_reclaim_block_group(cache, num_bytes);
- set_extent_bit(&trans->transaction->pinned_extents,
- bytenr, bytenr + num_bytes - 1,
- EXTENT_DIRTY, NULL);
- }
+ spin_unlock(&cache->lock);
+ spin_unlock(&space_info->lock);
- spin_lock(&trans->transaction->dirty_bgs_lock);
- if (list_empty(&cache->dirty_list)) {
- list_add_tail(&cache->dirty_list,
- &trans->transaction->dirty_bgs);
- trans->delayed_ref_updates++;
- btrfs_get_block_group(cache);
- }
- spin_unlock(&trans->transaction->dirty_bgs_lock);
+ set_extent_bit(&trans->transaction->pinned_extents, bytenr,
+ bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
+ }
- /*
- * No longer have used bytes in this block group, queue it for
- * deletion. We do this after adding the block group to the
- * dirty list to avoid races between cleaner kthread and space
- * cache writeout.
- */
- if (!alloc && old_val == 0) {
- if (!btrfs_test_opt(info, DISCARD_ASYNC))
- btrfs_mark_bg_unused(cache);
- } else if (!alloc && reclaim) {
- btrfs_mark_bg_to_reclaim(cache);
- }
+ spin_lock(&trans->transaction->dirty_bgs_lock);
+ if (list_empty(&cache->dirty_list)) {
+ list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
+ bg_already_dirty = false;
+ btrfs_get_block_group(cache);
+ }
+ spin_unlock(&trans->transaction->dirty_bgs_lock);
- btrfs_put_block_group(cache);
- total -= num_bytes;
- bytenr += num_bytes;
+ /*
+ * No longer have used bytes in this block group, queue it for deletion.
+ * We do this after adding the block group to the dirty list to avoid
+ * races between cleaner kthread and space cache writeout.
+ */
+ if (!alloc && old_val == 0) {
+ if (!btrfs_test_opt(info, DISCARD_ASYNC))
+ btrfs_mark_bg_unused(cache);
+ } else if (!alloc && reclaim) {
+ btrfs_mark_bg_to_reclaim(cache);
}
+ btrfs_put_block_group(cache);
+
/* Modified block groups are accounted for in the delayed_refs_rsv. */
- btrfs_update_delayed_refs_rsv(trans);
- return ret;
+ if (!bg_already_dirty)
+ btrfs_inc_delayed_refs_rsv_bg_updates(info);
+
+ return 0;
}
/*
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 77684c5e0c8b..ceb5f586a2d5 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -221,7 +221,8 @@ int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
if (num_bytes == 0)
return 0;
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
+ num_bytes, flush);
if (!ret)
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
@@ -261,7 +262,8 @@ int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
if (!ret)
return 0;
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
+ num_bytes, flush);
if (!ret) {
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
return 0;
@@ -279,10 +281,10 @@ u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *target = NULL;
/*
- * If we are the delayed_rsv then push to the global rsv, otherwise dump
- * into the delayed rsv if it is not full.
+ * If we are a delayed block reserve then push to the global rsv,
+ * otherwise dump into the global delayed reserve if it is not full.
*/
- if (block_rsv == delayed_rsv)
+ if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS)
target = global_rsv;
else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
target = delayed_rsv;
@@ -354,6 +356,11 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
min_items++;
}
+ if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
+ num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item);
+ min_items++;
+ }
+
/*
* But we also want to reserve enough space so we can do the fallback
* global reserve for an unlink, which is an additional
@@ -405,6 +412,7 @@ void btrfs_init_root_block_rsv(struct btrfs_root *root)
case BTRFS_EXTENT_TREE_OBJECTID:
case BTRFS_FREE_SPACE_TREE_OBJECTID:
case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
+ case BTRFS_RAID_STRIPE_TREE_OBJECTID:
root->block_rsv = &fs_info->delayed_refs_rsv;
break;
case BTRFS_ROOT_TREE_OBJECTID:
@@ -517,8 +525,8 @@ again:
block_rsv->type, ret);
}
try_reserve:
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
- BTRFS_RESERVE_NO_FLUSH);
+ ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
+ blocksize, BTRFS_RESERVE_NO_FLUSH);
if (!ret)
return block_rsv;
/*
@@ -539,7 +547,7 @@ try_reserve:
* one last time to force a reservation if there's enough actual space
* on disk to make the reservation.
*/
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
+ ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize,
BTRFS_RESERVE_FLUSH_EMERGENCY);
if (!ret)
return block_rsv;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index bda1fdbba666..5572ae52444e 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -8,6 +8,8 @@
#include <linux/hash.h>
#include <linux/refcount.h>
+#include <linux/fscrypt.h>
+#include <trace/events/btrfs.h>
#include "extent_map.h"
#include "extent_io.h"
#include "ordered-data.h"
@@ -79,11 +81,21 @@ struct btrfs_inode {
*/
struct btrfs_key location;
+ /* Cached value of inode property 'compression'. */
+ u8 prop_compress;
+
+ /*
+ * Force compression on the file using the defrag ioctl, could be
+ * different from prop_compress and takes precedence if set.
+ */
+ u8 defrag_compress;
+
/*
* Lock for counters and all fields used to determine if the inode is in
* the log or not (last_trans, last_sub_trans, last_log_commit,
- * logged_trans), to access/update new_delalloc_bytes and to update the
- * VFS' inode number of bytes used.
+ * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes,
+ * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to
+ * update the VFS' inode number of bytes used.
*/
spinlock_t lock;
@@ -102,8 +114,18 @@ struct btrfs_inode {
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
+ /*
+ * Counters to keep track of the number of extent item's we may use due
+ * to delalloc and such. outstanding_extents is the number of extent
+ * items we think we'll end up using, and reserved_extents is the number
+ * of extent items we've reserved metadata for. Protected by 'lock'.
+ */
+ unsigned outstanding_extents;
+
/* used to order data wrt metadata */
- struct btrfs_ordered_inode_tree ordered_tree;
+ spinlock_t ordered_tree_lock;
+ struct rb_root ordered_tree;
+ struct rb_node *ordered_tree_last;
/* list of all the delalloc inodes in the FS. There are times we need
* to write all the delalloc pages to disk, and this list is used
@@ -122,28 +144,31 @@ struct btrfs_inode {
u64 generation;
/*
- * transid of the trans_handle that last modified this inode
+ * ID of the transaction handle that last modified this inode.
+ * Protected by 'lock'.
*/
u64 last_trans;
/*
- * transid that last logged this inode
+ * ID of the transaction that last logged this inode.
+ * Protected by 'lock'.
*/
u64 logged_trans;
/*
- * log transid when this inode was last modified
+ * Log transaction ID when this inode was last modified.
+ * Protected by 'lock'.
*/
int last_sub_trans;
- /* a local copy of root's last_log_commit */
+ /* A local copy of root's last_log_commit. Protected by 'lock'. */
int last_log_commit;
union {
/*
* Total number of bytes pending delalloc, used by stat to
* calculate the real block usage of the file. This is used
- * only for files.
+ * only for files. Protected by 'lock'.
*/
u64 delalloc_bytes;
/*
@@ -161,7 +186,7 @@ struct btrfs_inode {
* Total number of bytes pending delalloc that fall within a file
* range that is either a hole or beyond EOF (and no prealloc extent
* exists in the range). This is always <= delalloc_bytes and this
- * is used only for files.
+ * is used only for files. Protected by 'lock'.
*/
u64 new_delalloc_bytes;
/*
@@ -172,15 +197,15 @@ struct btrfs_inode {
};
/*
- * total number of bytes pending defrag, used by stat to check whether
- * it needs COW.
+ * Total number of bytes pending defrag, used by stat to check whether
+ * it needs COW. Protected by 'lock'.
*/
u64 defrag_bytes;
/*
- * the size of the file stored in the metadata on disk. data=ordered
+ * The size of the file stored in the metadata on disk. data=ordered
* means the in-memory i_size might be larger than the size on disk
- * because not all the blocks are written yet.
+ * because not all the blocks are written yet. Protected by 'lock'.
*/
u64 disk_i_size;
@@ -214,7 +239,7 @@ struct btrfs_inode {
/*
* Number of bytes outstanding that are going to need csums. This is
- * used in ENOSPC accounting.
+ * used in ENOSPC accounting. Protected by 'lock'.
*/
u64 csum_bytes;
@@ -223,30 +248,13 @@ struct btrfs_inode {
/* Read-only compatibility flags, upper half of inode_item::flags */
u32 ro_flags;
- /*
- * Counters to keep track of the number of extent item's we may use due
- * to delalloc and such. outstanding_extents is the number of extent
- * items we think we'll end up using, and reserved_extents is the number
- * of extent items we've reserved metadata for.
- */
- unsigned outstanding_extents;
-
struct btrfs_block_rsv block_rsv;
- /*
- * Cached values of inode properties
- */
- unsigned prop_compress; /* per-file compression algorithm */
- /*
- * Force compression on the file using the defrag ioctl, could be
- * different from prop_compress and takes precedence if set
- */
- unsigned defrag_compress;
-
struct btrfs_delayed_node *delayed_node;
/* File creation time. */
- struct timespec64 i_otime;
+ u64 i_otime_sec;
+ u32 i_otime_nsec;
/* Hook into fs_info->delayed_iputs */
struct list_head delayed_iput;
@@ -387,7 +395,7 @@ static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
spin_lock(&inode->lock);
if (inode->logged_trans == generation &&
inode->last_sub_trans <= inode->last_log_commit &&
- inode->last_sub_trans <= inode->root->last_log_commit)
+ inode->last_sub_trans <= btrfs_get_root_last_log_commit(inode->root))
ret = true;
spin_unlock(&inode->lock);
return ret;
@@ -481,9 +489,9 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 end);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode);
+ struct btrfs_inode *inode);
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode);
+ struct btrfs_inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct btrfs_inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
deleted file mode 100644
index 3caf339c4bb3..000000000000
--- a/fs/btrfs/check-integrity.c
+++ /dev/null
@@ -1,2871 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) STRATO AG 2011. All rights reserved.
- */
-
-/*
- * This module can be used to catch cases when the btrfs kernel
- * code executes write requests to the disk that bring the file
- * system in an inconsistent state. In such a state, a power-loss
- * or kernel panic event would cause that the data on disk is
- * lost or at least damaged.
- *
- * Code is added that examines all block write requests during
- * runtime (including writes of the super block). Three rules
- * are verified and an error is printed on violation of the
- * rules:
- * 1. It is not allowed to write a disk block which is
- * currently referenced by the super block (either directly
- * or indirectly).
- * 2. When a super block is written, it is verified that all
- * referenced (directly or indirectly) blocks fulfill the
- * following requirements:
- * 2a. All referenced blocks have either been present when
- * the file system was mounted, (i.e., they have been
- * referenced by the super block) or they have been
- * written since then and the write completion callback
- * was called and no write error was indicated and a
- * FLUSH request to the device where these blocks are
- * located was received and completed.
- * 2b. All referenced blocks need to have a generation
- * number which is equal to the parent's number.
- *
- * One issue that was found using this module was that the log
- * tree on disk became temporarily corrupted because disk blocks
- * that had been in use for the log tree had been freed and
- * reused too early, while being referenced by the written super
- * block.
- *
- * The search term in the kernel log that can be used to filter
- * on the existence of detected integrity issues is
- * "btrfs: attempt".
- *
- * The integrity check is enabled via mount options. These
- * mount options are only supported if the integrity check
- * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY.
- *
- * Example #1, apply integrity checks to all metadata:
- * mount /dev/sdb1 /mnt -o check_int
- *
- * Example #2, apply integrity checks to all metadata and
- * to data extents:
- * mount /dev/sdb1 /mnt -o check_int_data
- *
- * Example #3, apply integrity checks to all metadata and dump
- * the tree that the super block references to kernel messages
- * each time after a super block was written:
- * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263
- *
- * If the integrity check tool is included and activated in
- * the mount options, plenty of kernel memory is used, and
- * plenty of additional CPU cycles are spent. Enabling this
- * functionality is not intended for normal use. In most
- * cases, unless you are a btrfs developer who needs to verify
- * the integrity of (super)-block write requests, do not
- * enable the config option BTRFS_FS_CHECK_INTEGRITY to
- * include and compile the integrity check tool.
- *
- * Expect millions of lines of information in the kernel log with an
- * enabled check_int_print_mask. Therefore set LOG_BUF_SHIFT in the
- * kernel config to at least 26 (which is 64MB). Usually the value is
- * limited to 21 (which is 2MB) in init/Kconfig. The file needs to be
- * changed like this before LOG_BUF_SHIFT can be set to a high value:
- * config LOG_BUF_SHIFT
- * int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
- * range 12 30
- */
-
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/blkdev.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <crypto/hash.h>
-#include "messages.h"
-#include "ctree.h"
-#include "disk-io.h"
-#include "transaction.h"
-#include "extent_io.h"
-#include "volumes.h"
-#include "print-tree.h"
-#include "locking.h"
-#include "check-integrity.h"
-#include "rcu-string.h"
-#include "compression.h"
-#include "accessors.h"
-
-#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
-#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
-#define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100
-#define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051
-#define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807
-#define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530
-#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
-#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters,
- * excluding " [...]" */
-#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
-
-/*
- * The definition of the bitmask fields for the print_mask.
- * They are specified with the mount option check_integrity_print_mask.
- */
-#define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001
-#define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002
-#define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004
-#define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008
-#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010
-#define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020
-#define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040
-#define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080
-#define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100
-#define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200
-#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400
-#define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800
-#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000
-#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE 0x00002000
-
-struct btrfsic_dev_state;
-struct btrfsic_state;
-
-struct btrfsic_block {
- u32 magic_num; /* only used for debug purposes */
- unsigned int is_metadata:1; /* if it is meta-data, not data-data */
- unsigned int is_superblock:1; /* if it is one of the superblocks */
- unsigned int is_iodone:1; /* if is done by lower subsystem */
- unsigned int iodone_w_error:1; /* error was indicated to endio */
- unsigned int never_written:1; /* block was added because it was
- * referenced, not because it was
- * written */
- unsigned int mirror_num; /* large enough to hold
- * BTRFS_SUPER_MIRROR_MAX */
- struct btrfsic_dev_state *dev_state;
- u64 dev_bytenr; /* key, physical byte num on disk */
- u64 logical_bytenr; /* logical byte num on disk */
- u64 generation;
- struct btrfs_disk_key disk_key; /* extra info to print in case of
- * issues, will not always be correct */
- struct list_head collision_resolving_node; /* list node */
- struct list_head all_blocks_node; /* list node */
-
- /* the following two lists contain block_link items */
- struct list_head ref_to_list; /* list */
- struct list_head ref_from_list; /* list */
- struct btrfsic_block *next_in_same_bio;
- void *orig_bio_private;
- bio_end_io_t *orig_bio_end_io;
- blk_opf_t submit_bio_bh_rw;
- u64 flush_gen; /* only valid if !never_written */
-};
-
-/*
- * Elements of this type are allocated dynamically and required because
- * each block object can refer to and can be ref from multiple blocks.
- * The key to lookup them in the hashtable is the dev_bytenr of
- * the block ref to plus the one from the block referred from.
- * The fact that they are searchable via a hashtable and that a
- * ref_cnt is maintained is not required for the btrfs integrity
- * check algorithm itself, it is only used to make the output more
- * beautiful in case that an error is detected (an error is defined
- * as a write operation to a block while that block is still referenced).
- */
-struct btrfsic_block_link {
- u32 magic_num; /* only used for debug purposes */
- u32 ref_cnt;
- struct list_head node_ref_to; /* list node */
- struct list_head node_ref_from; /* list node */
- struct list_head collision_resolving_node; /* list node */
- struct btrfsic_block *block_ref_to;
- struct btrfsic_block *block_ref_from;
- u64 parent_generation;
-};
-
-struct btrfsic_dev_state {
- u32 magic_num; /* only used for debug purposes */
- struct block_device *bdev;
- struct btrfsic_state *state;
- struct list_head collision_resolving_node; /* list node */
- struct btrfsic_block dummy_block_for_bio_bh_flush;
- u64 last_flush_gen;
-};
-
-struct btrfsic_block_hashtable {
- struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE];
-};
-
-struct btrfsic_block_link_hashtable {
- struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE];
-};
-
-struct btrfsic_dev_state_hashtable {
- struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE];
-};
-
-struct btrfsic_block_data_ctx {
- u64 start; /* virtual bytenr */
- u64 dev_bytenr; /* physical bytenr on device */
- u32 len;
- struct btrfsic_dev_state *dev;
- char **datav;
- struct page **pagev;
- void *mem_to_free;
-};
-
-/* This structure is used to implement recursion without occupying
- * any stack space, refer to btrfsic_process_metablock() */
-struct btrfsic_stack_frame {
- u32 magic;
- u32 nr;
- int error;
- int i;
- int limit_nesting;
- int num_copies;
- int mirror_num;
- struct btrfsic_block *block;
- struct btrfsic_block_data_ctx *block_ctx;
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx next_block_ctx;
- struct btrfs_header *hdr;
- struct btrfsic_stack_frame *prev;
-};
-
-/* Some state per mounted filesystem */
-struct btrfsic_state {
- u32 print_mask;
- int include_extent_data;
- struct list_head all_blocks_list;
- struct btrfsic_block_hashtable block_hashtable;
- struct btrfsic_block_link_hashtable block_link_hashtable;
- struct btrfs_fs_info *fs_info;
- u64 max_superblock_generation;
- struct btrfsic_block *latest_superblock;
- u32 metablock_size;
- u32 datablock_size;
-};
-
-static int btrfsic_process_metablock(struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- int limit_nesting, int force_iodone_flag);
-static void btrfsic_read_from_block_data(
- struct btrfsic_block_data_ctx *block_ctx,
- void *dst, u32 offset, size_t len);
-static int btrfsic_create_link_to_next_block(
- struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx
- *block_ctx, u64 next_bytenr,
- int limit_nesting,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block **next_blockp,
- int force_iodone_flag,
- int *num_copiesp, int *mirror_nump,
- struct btrfs_disk_key *disk_key,
- u64 parent_generation);
-static int btrfsic_handle_extent_data(struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- u32 item_offset, int force_iodone_flag);
-static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
- struct btrfsic_block_data_ctx *block_ctx_out,
- int mirror_num);
-static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
-static int btrfsic_read_block(struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx);
-static int btrfsic_process_written_superblock(
- struct btrfsic_state *state,
- struct btrfsic_block *const block,
- struct btrfs_super_block *const super_hdr);
-static void btrfsic_bio_end_io(struct bio *bp);
-static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int recursion_level);
-static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
- struct btrfsic_block *const block,
- int recursion_level);
-static void btrfsic_print_add_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l);
-static void btrfsic_print_rem_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l);
-static char btrfsic_get_block_type(const struct btrfsic_state *state,
- const struct btrfsic_block *block);
-static void btrfsic_dump_tree(const struct btrfsic_state *state);
-static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int indent_level);
-static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block *next_block,
- struct btrfsic_block *from_block,
- u64 parent_generation);
-static struct btrfsic_block *btrfsic_block_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx,
- const char *additional_string,
- int is_metadata,
- int is_iodone,
- int never_written,
- int mirror_num,
- int *was_created);
-static int btrfsic_process_superblock_dev_mirror(
- struct btrfsic_state *state,
- struct btrfsic_dev_state *dev_state,
- struct btrfs_device *device,
- int superblock_mirror_num,
- struct btrfsic_dev_state **selected_dev_state,
- struct btrfs_super_block *selected_super);
-static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev);
-static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
- u64 bytenr,
- struct btrfsic_dev_state *dev_state,
- u64 dev_bytenr);
-
-static struct mutex btrfsic_mutex;
-static int btrfsic_is_initialized;
-static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable;
-
-
-static void btrfsic_block_init(struct btrfsic_block *b)
-{
- b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER;
- b->dev_state = NULL;
- b->dev_bytenr = 0;
- b->logical_bytenr = 0;
- b->generation = BTRFSIC_GENERATION_UNKNOWN;
- b->disk_key.objectid = 0;
- b->disk_key.type = 0;
- b->disk_key.offset = 0;
- b->is_metadata = 0;
- b->is_superblock = 0;
- b->is_iodone = 0;
- b->iodone_w_error = 0;
- b->never_written = 0;
- b->mirror_num = 0;
- b->next_in_same_bio = NULL;
- b->orig_bio_private = NULL;
- b->orig_bio_end_io = NULL;
- INIT_LIST_HEAD(&b->collision_resolving_node);
- INIT_LIST_HEAD(&b->all_blocks_node);
- INIT_LIST_HEAD(&b->ref_to_list);
- INIT_LIST_HEAD(&b->ref_from_list);
- b->submit_bio_bh_rw = 0;
- b->flush_gen = 0;
-}
-
-static struct btrfsic_block *btrfsic_block_alloc(void)
-{
- struct btrfsic_block *b;
-
- b = kzalloc(sizeof(*b), GFP_NOFS);
- if (NULL != b)
- btrfsic_block_init(b);
-
- return b;
-}
-
-static void btrfsic_block_free(struct btrfsic_block *b)
-{
- BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num));
- kfree(b);
-}
-
-static void btrfsic_block_link_init(struct btrfsic_block_link *l)
-{
- l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER;
- l->ref_cnt = 1;
- INIT_LIST_HEAD(&l->node_ref_to);
- INIT_LIST_HEAD(&l->node_ref_from);
- INIT_LIST_HEAD(&l->collision_resolving_node);
- l->block_ref_to = NULL;
- l->block_ref_from = NULL;
-}
-
-static struct btrfsic_block_link *btrfsic_block_link_alloc(void)
-{
- struct btrfsic_block_link *l;
-
- l = kzalloc(sizeof(*l), GFP_NOFS);
- if (NULL != l)
- btrfsic_block_link_init(l);
-
- return l;
-}
-
-static void btrfsic_block_link_free(struct btrfsic_block_link *l)
-{
- BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num));
- kfree(l);
-}
-
-static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds)
-{
- ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER;
- ds->bdev = NULL;
- ds->state = NULL;
- INIT_LIST_HEAD(&ds->collision_resolving_node);
- ds->last_flush_gen = 0;
- btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush);
- ds->dummy_block_for_bio_bh_flush.is_iodone = 1;
- ds->dummy_block_for_bio_bh_flush.dev_state = ds;
-}
-
-static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void)
-{
- struct btrfsic_dev_state *ds;
-
- ds = kzalloc(sizeof(*ds), GFP_NOFS);
- if (NULL != ds)
- btrfsic_dev_state_init(ds);
-
- return ds;
-}
-
-static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds)
-{
- BUG_ON(!(NULL == ds ||
- BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num));
- kfree(ds);
-}
-
-static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h)
-{
- int i;
-
- for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++)
- INIT_LIST_HEAD(h->table + i);
-}
-
-static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
- struct btrfsic_block_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(b->dev_bytenr >> 16)) ^
- ((unsigned int)((uintptr_t)b->dev_state->bdev))) &
- (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
-
- list_add(&b->collision_resolving_node, h->table + hashval);
-}
-
-static void btrfsic_block_hashtable_remove(struct btrfsic_block *b)
-{
- list_del(&b->collision_resolving_node);
-}
-
-static struct btrfsic_block *btrfsic_block_hashtable_lookup(
- struct block_device *bdev,
- u64 dev_bytenr,
- struct btrfsic_block_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(dev_bytenr >> 16)) ^
- ((unsigned int)((uintptr_t)bdev))) &
- (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
- struct btrfsic_block *b;
-
- list_for_each_entry(b, h->table + hashval, collision_resolving_node) {
- if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
- return b;
- }
-
- return NULL;
-}
-
-static void btrfsic_block_link_hashtable_init(
- struct btrfsic_block_link_hashtable *h)
-{
- int i;
-
- for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++)
- INIT_LIST_HEAD(h->table + i);
-}
-
-static void btrfsic_block_link_hashtable_add(
- struct btrfsic_block_link *l,
- struct btrfsic_block_link_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^
- ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^
- ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^
- ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev)))
- & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
-
- BUG_ON(NULL == l->block_ref_to);
- BUG_ON(NULL == l->block_ref_from);
- list_add(&l->collision_resolving_node, h->table + hashval);
-}
-
-static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l)
-{
- list_del(&l->collision_resolving_node);
-}
-
-static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
- struct block_device *bdev_ref_to,
- u64 dev_bytenr_ref_to,
- struct block_device *bdev_ref_from,
- u64 dev_bytenr_ref_from,
- struct btrfsic_block_link_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)(dev_bytenr_ref_to >> 16)) ^
- ((unsigned int)(dev_bytenr_ref_from >> 16)) ^
- ((unsigned int)((uintptr_t)bdev_ref_to)) ^
- ((unsigned int)((uintptr_t)bdev_ref_from))) &
- (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
- struct btrfsic_block_link *l;
-
- list_for_each_entry(l, h->table + hashval, collision_resolving_node) {
- BUG_ON(NULL == l->block_ref_to);
- BUG_ON(NULL == l->block_ref_from);
- if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
- l->block_ref_to->dev_bytenr == dev_bytenr_ref_to &&
- l->block_ref_from->dev_state->bdev == bdev_ref_from &&
- l->block_ref_from->dev_bytenr == dev_bytenr_ref_from)
- return l;
- }
-
- return NULL;
-}
-
-static void btrfsic_dev_state_hashtable_init(
- struct btrfsic_dev_state_hashtable *h)
-{
- int i;
-
- for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++)
- INIT_LIST_HEAD(h->table + i);
-}
-
-static void btrfsic_dev_state_hashtable_add(
- struct btrfsic_dev_state *ds,
- struct btrfsic_dev_state_hashtable *h)
-{
- const unsigned int hashval =
- (((unsigned int)((uintptr_t)ds->bdev->bd_dev)) &
- (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
-
- list_add(&ds->collision_resolving_node, h->table + hashval);
-}
-
-static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds)
-{
- list_del(&ds->collision_resolving_node);
-}
-
-static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(dev_t dev,
- struct btrfsic_dev_state_hashtable *h)
-{
- const unsigned int hashval =
- dev & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1);
- struct btrfsic_dev_state *ds;
-
- list_for_each_entry(ds, h->table + hashval, collision_resolving_node) {
- if (ds->bdev->bd_dev == dev)
- return ds;
- }
-
- return NULL;
-}
-
-static int btrfsic_process_superblock(struct btrfsic_state *state,
- struct btrfs_fs_devices *fs_devices)
-{
- struct btrfs_super_block *selected_super;
- struct list_head *dev_head = &fs_devices->devices;
- struct btrfs_device *device;
- struct btrfsic_dev_state *selected_dev_state = NULL;
- int ret = 0;
- int pass;
-
- selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
- if (!selected_super)
- return -ENOMEM;
-
- list_for_each_entry(device, dev_head, dev_list) {
- int i;
- struct btrfsic_dev_state *dev_state;
-
- if (!device->bdev || !device->name)
- continue;
-
- dev_state = btrfsic_dev_state_lookup(device->bdev->bd_dev);
- BUG_ON(NULL == dev_state);
- for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- ret = btrfsic_process_superblock_dev_mirror(
- state, dev_state, device, i,
- &selected_dev_state, selected_super);
- if (0 != ret && 0 == i) {
- kfree(selected_super);
- return ret;
- }
- }
- }
-
- if (NULL == state->latest_superblock) {
- pr_info("btrfsic: no superblock found!\n");
- kfree(selected_super);
- return -1;
- }
-
- for (pass = 0; pass < 3; pass++) {
- int num_copies;
- int mirror_num;
- u64 next_bytenr;
-
- switch (pass) {
- case 0:
- next_bytenr = btrfs_super_root(selected_super);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("root@%llu\n", next_bytenr);
- break;
- case 1:
- next_bytenr = btrfs_super_chunk_root(selected_super);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("chunk@%llu\n", next_bytenr);
- break;
- case 2:
- next_bytenr = btrfs_super_log_root(selected_super);
- if (0 == next_bytenr)
- continue;
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("log@%llu\n", next_bytenr);
- break;
- }
-
- num_copies = btrfs_num_copies(state->fs_info, next_bytenr,
- state->metablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
-
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx tmp_next_block_ctx;
- struct btrfsic_block_link *l;
-
- ret = btrfsic_map_block(state, next_bytenr,
- state->metablock_size,
- &tmp_next_block_ctx,
- mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(root @%llu, mirror %d) failed!\n",
- next_bytenr, mirror_num);
- kfree(selected_super);
- return -1;
- }
-
- next_block = btrfsic_block_hashtable_lookup(
- tmp_next_block_ctx.dev->bdev,
- tmp_next_block_ctx.dev_bytenr,
- &state->block_hashtable);
- BUG_ON(NULL == next_block);
-
- l = btrfsic_block_link_hashtable_lookup(
- tmp_next_block_ctx.dev->bdev,
- tmp_next_block_ctx.dev_bytenr,
- state->latest_superblock->dev_state->
- bdev,
- state->latest_superblock->dev_bytenr,
- &state->block_link_hashtable);
- BUG_ON(NULL == l);
-
- ret = btrfsic_read_block(state, &tmp_next_block_ctx);
- if (ret < (int)PAGE_SIZE) {
- pr_info("btrfsic: read @logical %llu failed!\n",
- tmp_next_block_ctx.start);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- kfree(selected_super);
- return -1;
- }
-
- ret = btrfsic_process_metablock(state,
- next_block,
- &tmp_next_block_ctx,
- BTRFS_MAX_LEVEL + 3, 1);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- }
- }
-
- kfree(selected_super);
- return ret;
-}
-
-static int btrfsic_process_superblock_dev_mirror(
- struct btrfsic_state *state,
- struct btrfsic_dev_state *dev_state,
- struct btrfs_device *device,
- int superblock_mirror_num,
- struct btrfsic_dev_state **selected_dev_state,
- struct btrfs_super_block *selected_super)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfs_super_block *super_tmp;
- u64 dev_bytenr;
- struct btrfsic_block *superblock_tmp;
- int pass;
- struct block_device *const superblock_bdev = device->bdev;
- struct page *page;
- struct address_space *mapping = superblock_bdev->bd_inode->i_mapping;
- int ret = 0;
-
- /* super block bytenr is always the unmapped device bytenr */
- dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
- if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->commit_total_bytes)
- return -1;
-
- page = read_cache_page_gfp(mapping, dev_bytenr >> PAGE_SHIFT, GFP_NOFS);
- if (IS_ERR(page))
- return -1;
-
- super_tmp = page_address(page);
-
- if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
- btrfs_super_magic(super_tmp) != BTRFS_MAGIC ||
- memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
- btrfs_super_nodesize(super_tmp) != state->metablock_size ||
- btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
- ret = 0;
- goto out;
- }
-
- superblock_tmp =
- btrfsic_block_hashtable_lookup(superblock_bdev,
- dev_bytenr,
- &state->block_hashtable);
- if (NULL == superblock_tmp) {
- superblock_tmp = btrfsic_block_alloc();
- if (NULL == superblock_tmp) {
- ret = -1;
- goto out;
- }
- /* for superblock, only the dev_bytenr makes sense */
- superblock_tmp->dev_bytenr = dev_bytenr;
- superblock_tmp->dev_state = dev_state;
- superblock_tmp->logical_bytenr = dev_bytenr;
- superblock_tmp->generation = btrfs_super_generation(super_tmp);
- superblock_tmp->is_metadata = 1;
- superblock_tmp->is_superblock = 1;
- superblock_tmp->is_iodone = 1;
- superblock_tmp->never_written = 0;
- superblock_tmp->mirror_num = 1 + superblock_mirror_num;
- if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- btrfs_info_in_rcu(fs_info,
- "new initial S-block (bdev %p, %s) @%llu (%pg/%llu/%d)",
- superblock_bdev,
- btrfs_dev_name(device), dev_bytenr,
- dev_state->bdev, dev_bytenr,
- superblock_mirror_num);
- list_add(&superblock_tmp->all_blocks_node,
- &state->all_blocks_list);
- btrfsic_block_hashtable_add(superblock_tmp,
- &state->block_hashtable);
- }
-
- /* select the one with the highest generation field */
- if (btrfs_super_generation(super_tmp) >
- state->max_superblock_generation ||
- 0 == state->max_superblock_generation) {
- memcpy(selected_super, super_tmp, sizeof(*selected_super));
- *selected_dev_state = dev_state;
- state->max_superblock_generation =
- btrfs_super_generation(super_tmp);
- state->latest_superblock = superblock_tmp;
- }
-
- for (pass = 0; pass < 3; pass++) {
- u64 next_bytenr;
- int num_copies;
- int mirror_num;
- const char *additional_string = NULL;
- struct btrfs_disk_key tmp_disk_key;
-
- tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
- tmp_disk_key.offset = 0;
- switch (pass) {
- case 0:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_ROOT_TREE_OBJECTID);
- additional_string = "initial root ";
- next_bytenr = btrfs_super_root(super_tmp);
- break;
- case 1:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_CHUNK_TREE_OBJECTID);
- additional_string = "initial chunk ";
- next_bytenr = btrfs_super_chunk_root(super_tmp);
- break;
- case 2:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_TREE_LOG_OBJECTID);
- additional_string = "initial log ";
- next_bytenr = btrfs_super_log_root(super_tmp);
- if (0 == next_bytenr)
- continue;
- break;
- }
-
- num_copies = btrfs_num_copies(fs_info, next_bytenr,
- state->metablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx tmp_next_block_ctx;
- struct btrfsic_block_link *l;
-
- if (btrfsic_map_block(state, next_bytenr,
- state->metablock_size,
- &tmp_next_block_ctx,
- mirror_num)) {
- pr_info("btrfsic: btrfsic_map_block(bytenr @%llu, mirror %d) failed!\n",
- next_bytenr, mirror_num);
- ret = -1;
- goto out;
- }
-
- next_block = btrfsic_block_lookup_or_add(
- state, &tmp_next_block_ctx,
- additional_string, 1, 1, 0,
- mirror_num, NULL);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- ret = -1;
- goto out;
- }
-
- next_block->disk_key = tmp_disk_key;
- next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
- l = btrfsic_block_link_lookup_or_add(
- state, &tmp_next_block_ctx,
- next_block, superblock_tmp,
- BTRFSIC_GENERATION_UNKNOWN);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- if (NULL == l) {
- ret = -1;
- goto out;
- }
- }
- }
- if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES)
- btrfsic_dump_tree_sub(state, superblock_tmp, 0);
-
-out:
- put_page(page);
- return ret;
-}
-
-static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
-{
- struct btrfsic_stack_frame *sf;
-
- sf = kzalloc(sizeof(*sf), GFP_NOFS);
- if (sf)
- sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
- return sf;
-}
-
-static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf)
-{
- BUG_ON(!(NULL == sf ||
- BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic));
- kfree(sf);
-}
-
-static noinline_for_stack int btrfsic_process_metablock(
- struct btrfsic_state *state,
- struct btrfsic_block *const first_block,
- struct btrfsic_block_data_ctx *const first_block_ctx,
- int first_limit_nesting, int force_iodone_flag)
-{
- struct btrfsic_stack_frame initial_stack_frame = { 0 };
- struct btrfsic_stack_frame *sf;
- struct btrfsic_stack_frame *next_stack;
- struct btrfs_header *const first_hdr =
- (struct btrfs_header *)first_block_ctx->datav[0];
-
- BUG_ON(!first_hdr);
- sf = &initial_stack_frame;
- sf->error = 0;
- sf->i = -1;
- sf->limit_nesting = first_limit_nesting;
- sf->block = first_block;
- sf->block_ctx = first_block_ctx;
- sf->next_block = NULL;
- sf->hdr = first_hdr;
- sf->prev = NULL;
-
-continue_with_new_stack_frame:
- sf->block->generation = btrfs_stack_header_generation(sf->hdr);
- if (0 == sf->hdr->level) {
- struct btrfs_leaf *const leafhdr =
- (struct btrfs_leaf *)sf->hdr;
-
- if (-1 == sf->i) {
- sf->nr = btrfs_stack_header_nritems(&leafhdr->header);
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("leaf %llu items %d generation %llu owner %llu\n",
- sf->block_ctx->start, sf->nr,
- btrfs_stack_header_generation(
- &leafhdr->header),
- btrfs_stack_header_owner(
- &leafhdr->header));
- }
-
-continue_with_current_leaf_stack_frame:
- if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
- sf->i++;
- sf->num_copies = 0;
- }
-
- if (sf->i < sf->nr) {
- struct btrfs_item disk_item;
- u32 disk_item_offset =
- (uintptr_t)(leafhdr->items + sf->i) -
- (uintptr_t)leafhdr;
- struct btrfs_disk_key *disk_key;
- u8 type;
- u32 item_offset;
- u32 item_size;
-
- if (disk_item_offset + sizeof(struct btrfs_item) >
- sf->block_ctx->len) {
-leaf_item_out_of_bounce_error:
- pr_info(
- "btrfsic: leaf item out of bounce at logical %llu, dev %pg\n",
- sf->block_ctx->start,
- sf->block_ctx->dev->bdev);
- goto one_stack_frame_backwards;
- }
- btrfsic_read_from_block_data(sf->block_ctx,
- &disk_item,
- disk_item_offset,
- sizeof(struct btrfs_item));
- item_offset = btrfs_stack_item_offset(&disk_item);
- item_size = btrfs_stack_item_size(&disk_item);
- disk_key = &disk_item.key;
- type = btrfs_disk_key_type(disk_key);
-
- if (BTRFS_ROOT_ITEM_KEY == type) {
- struct btrfs_root_item root_item;
- u32 root_item_offset;
- u64 next_bytenr;
-
- root_item_offset = item_offset +
- offsetof(struct btrfs_leaf, items);
- if (root_item_offset + item_size >
- sf->block_ctx->len)
- goto leaf_item_out_of_bounce_error;
- btrfsic_read_from_block_data(
- sf->block_ctx, &root_item,
- root_item_offset,
- item_size);
- next_bytenr = btrfs_root_bytenr(&root_item);
-
- sf->error =
- btrfsic_create_link_to_next_block(
- state,
- sf->block,
- sf->block_ctx,
- next_bytenr,
- sf->limit_nesting,
- &sf->next_block_ctx,
- &sf->next_block,
- force_iodone_flag,
- &sf->num_copies,
- &sf->mirror_num,
- disk_key,
- btrfs_root_generation(
- &root_item));
- if (sf->error)
- goto one_stack_frame_backwards;
-
- if (NULL != sf->next_block) {
- struct btrfs_header *const next_hdr =
- (struct btrfs_header *)
- sf->next_block_ctx.datav[0];
-
- next_stack =
- btrfsic_stack_frame_alloc();
- if (NULL == next_stack) {
- sf->error = -1;
- btrfsic_release_block_ctx(
- &sf->
- next_block_ctx);
- goto one_stack_frame_backwards;
- }
-
- next_stack->i = -1;
- next_stack->block = sf->next_block;
- next_stack->block_ctx =
- &sf->next_block_ctx;
- next_stack->next_block = NULL;
- next_stack->hdr = next_hdr;
- next_stack->limit_nesting =
- sf->limit_nesting - 1;
- next_stack->prev = sf;
- sf = next_stack;
- goto continue_with_new_stack_frame;
- }
- } else if (BTRFS_EXTENT_DATA_KEY == type &&
- state->include_extent_data) {
- sf->error = btrfsic_handle_extent_data(
- state,
- sf->block,
- sf->block_ctx,
- item_offset,
- force_iodone_flag);
- if (sf->error)
- goto one_stack_frame_backwards;
- }
-
- goto continue_with_current_leaf_stack_frame;
- }
- } else {
- struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr;
-
- if (-1 == sf->i) {
- sf->nr = btrfs_stack_header_nritems(&nodehdr->header);
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("node %llu level %d items %d generation %llu owner %llu\n",
- sf->block_ctx->start,
- nodehdr->header.level, sf->nr,
- btrfs_stack_header_generation(
- &nodehdr->header),
- btrfs_stack_header_owner(
- &nodehdr->header));
- }
-
-continue_with_current_node_stack_frame:
- if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
- sf->i++;
- sf->num_copies = 0;
- }
-
- if (sf->i < sf->nr) {
- struct btrfs_key_ptr key_ptr;
- u32 key_ptr_offset;
- u64 next_bytenr;
-
- key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
- (uintptr_t)nodehdr;
- if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
- sf->block_ctx->len) {
- pr_info(
- "btrfsic: node item out of bounce at logical %llu, dev %pg\n",
- sf->block_ctx->start,
- sf->block_ctx->dev->bdev);
- goto one_stack_frame_backwards;
- }
- btrfsic_read_from_block_data(
- sf->block_ctx, &key_ptr, key_ptr_offset,
- sizeof(struct btrfs_key_ptr));
- next_bytenr = btrfs_stack_key_blockptr(&key_ptr);
-
- sf->error = btrfsic_create_link_to_next_block(
- state,
- sf->block,
- sf->block_ctx,
- next_bytenr,
- sf->limit_nesting,
- &sf->next_block_ctx,
- &sf->next_block,
- force_iodone_flag,
- &sf->num_copies,
- &sf->mirror_num,
- &key_ptr.key,
- btrfs_stack_key_generation(&key_ptr));
- if (sf->error)
- goto one_stack_frame_backwards;
-
- if (NULL != sf->next_block) {
- struct btrfs_header *const next_hdr =
- (struct btrfs_header *)
- sf->next_block_ctx.datav[0];
-
- next_stack = btrfsic_stack_frame_alloc();
- if (NULL == next_stack) {
- sf->error = -1;
- goto one_stack_frame_backwards;
- }
-
- next_stack->i = -1;
- next_stack->block = sf->next_block;
- next_stack->block_ctx = &sf->next_block_ctx;
- next_stack->next_block = NULL;
- next_stack->hdr = next_hdr;
- next_stack->limit_nesting =
- sf->limit_nesting - 1;
- next_stack->prev = sf;
- sf = next_stack;
- goto continue_with_new_stack_frame;
- }
-
- goto continue_with_current_node_stack_frame;
- }
- }
-
-one_stack_frame_backwards:
- if (NULL != sf->prev) {
- struct btrfsic_stack_frame *const prev = sf->prev;
-
- /* the one for the initial block is freed in the caller */
- btrfsic_release_block_ctx(sf->block_ctx);
-
- if (sf->error) {
- prev->error = sf->error;
- btrfsic_stack_frame_free(sf);
- sf = prev;
- goto one_stack_frame_backwards;
- }
-
- btrfsic_stack_frame_free(sf);
- sf = prev;
- goto continue_with_new_stack_frame;
- } else {
- BUG_ON(&initial_stack_frame != sf);
- }
-
- return sf->error;
-}
-
-static void btrfsic_read_from_block_data(
- struct btrfsic_block_data_ctx *block_ctx,
- void *dstv, u32 offset, size_t len)
-{
- size_t cur;
- size_t pgoff;
- char *kaddr;
- char *dst = (char *)dstv;
- size_t start_offset = offset_in_page(block_ctx->start);
- unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
-
- WARN_ON(offset + len > block_ctx->len);
- pgoff = offset_in_page(start_offset + offset);
-
- while (len > 0) {
- cur = min(len, ((size_t)PAGE_SIZE - pgoff));
- BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
- kaddr = block_ctx->datav[i];
- memcpy(dst, kaddr + pgoff, cur);
-
- dst += cur;
- len -= cur;
- pgoff = 0;
- i++;
- }
-}
-
-static int btrfsic_create_link_to_next_block(
- struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- u64 next_bytenr,
- int limit_nesting,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block **next_blockp,
- int force_iodone_flag,
- int *num_copiesp, int *mirror_nump,
- struct btrfs_disk_key *disk_key,
- u64 parent_generation)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfsic_block *next_block = NULL;
- int ret;
- struct btrfsic_block_link *l;
- int did_alloc_block_link;
- int block_was_created;
-
- *next_blockp = NULL;
- if (0 == *num_copiesp) {
- *num_copiesp = btrfs_num_copies(fs_info, next_bytenr,
- state->metablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, *num_copiesp);
- *mirror_nump = 1;
- }
-
- if (*mirror_nump > *num_copiesp)
- return 0;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic_create_link_to_next_block(mirror_num=%d)\n",
- *mirror_nump);
- ret = btrfsic_map_block(state, next_bytenr,
- state->metablock_size,
- next_block_ctx, *mirror_nump);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
- next_bytenr, *mirror_nump);
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
-
- next_block = btrfsic_block_lookup_or_add(state,
- next_block_ctx, "referenced ",
- 1, force_iodone_flag,
- !force_iodone_flag,
- *mirror_nump,
- &block_was_created);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
- if (block_was_created) {
- l = NULL;
- next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
- } else {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
- if (next_block->logical_bytenr != next_bytenr &&
- !(!next_block->is_metadata &&
- 0 == next_block->logical_bytenr))
- pr_info(
-"referenced block @%llu (%pg/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu)\n",
- next_bytenr, next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr, *mirror_nump,
- btrfsic_get_block_type(state,
- next_block),
- next_block->logical_bytenr);
- else
- pr_info(
- "referenced block @%llu (%pg/%llu/%d) found in hash table, %c\n",
- next_bytenr, next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr, *mirror_nump,
- btrfsic_get_block_type(state,
- next_block));
- }
- next_block->logical_bytenr = next_bytenr;
-
- next_block->mirror_num = *mirror_nump;
- l = btrfsic_block_link_hashtable_lookup(
- next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr,
- block_ctx->dev->bdev,
- block_ctx->dev_bytenr,
- &state->block_link_hashtable);
- }
-
- next_block->disk_key = *disk_key;
- if (NULL == l) {
- l = btrfsic_block_link_alloc();
- if (NULL == l) {
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
-
- did_alloc_block_link = 1;
- l->block_ref_to = next_block;
- l->block_ref_from = block;
- l->ref_cnt = 1;
- l->parent_generation = parent_generation;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
-
- list_add(&l->node_ref_to, &block->ref_to_list);
- list_add(&l->node_ref_from, &next_block->ref_from_list);
-
- btrfsic_block_link_hashtable_add(l,
- &state->block_link_hashtable);
- } else {
- did_alloc_block_link = 0;
- if (0 == limit_nesting) {
- l->ref_cnt++;
- l->parent_generation = parent_generation;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
- }
- }
-
- if (limit_nesting > 0 && did_alloc_block_link) {
- ret = btrfsic_read_block(state, next_block_ctx);
- if (ret < (int)next_block_ctx->len) {
- pr_info("btrfsic: read block @logical %llu failed!\n",
- next_bytenr);
- btrfsic_release_block_ctx(next_block_ctx);
- *next_blockp = NULL;
- return -1;
- }
-
- *next_blockp = next_block;
- } else {
- *next_blockp = NULL;
- }
- (*mirror_nump)++;
-
- return 0;
-}
-
-static int btrfsic_handle_extent_data(
- struct btrfsic_state *state,
- struct btrfsic_block *block,
- struct btrfsic_block_data_ctx *block_ctx,
- u32 item_offset, int force_iodone_flag)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfs_file_extent_item file_extent_item;
- u64 file_extent_item_offset;
- u64 next_bytenr;
- u64 num_bytes;
- u64 generation;
- struct btrfsic_block_link *l;
- int ret;
-
- file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
- item_offset;
- if (file_extent_item_offset +
- offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
- block_ctx->len) {
- pr_info("btrfsic: file item out of bounce at logical %llu, dev %pg\n",
- block_ctx->start, block_ctx->dev->bdev);
- return -1;
- }
-
- btrfsic_read_from_block_data(block_ctx, &file_extent_item,
- file_extent_item_offset,
- offsetof(struct btrfs_file_extent_item, disk_num_bytes));
- if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
- btrfs_stack_file_extent_disk_bytenr(&file_extent_item) == 0) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- pr_info("extent_data: type %u, disk_bytenr = %llu\n",
- file_extent_item.type,
- btrfs_stack_file_extent_disk_bytenr(
- &file_extent_item));
- return 0;
- }
-
- if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
- block_ctx->len) {
- pr_info("btrfsic: file item out of bounce at logical %llu, dev %pg\n",
- block_ctx->start, block_ctx->dev->bdev);
- return -1;
- }
- btrfsic_read_from_block_data(block_ctx, &file_extent_item,
- file_extent_item_offset,
- sizeof(struct btrfs_file_extent_item));
- next_bytenr = btrfs_stack_file_extent_disk_bytenr(&file_extent_item);
- if (btrfs_stack_file_extent_compression(&file_extent_item) ==
- BTRFS_COMPRESS_NONE) {
- next_bytenr += btrfs_stack_file_extent_offset(&file_extent_item);
- num_bytes = btrfs_stack_file_extent_num_bytes(&file_extent_item);
- } else {
- num_bytes = btrfs_stack_file_extent_disk_num_bytes(&file_extent_item);
- }
- generation = btrfs_stack_file_extent_generation(&file_extent_item);
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- pr_info("extent_data: type %u, disk_bytenr = %llu, offset = %llu, num_bytes = %llu\n",
- file_extent_item.type,
- btrfs_stack_file_extent_disk_bytenr(&file_extent_item),
- btrfs_stack_file_extent_offset(&file_extent_item),
- num_bytes);
- while (num_bytes > 0) {
- u32 chunk_len;
- int num_copies;
- int mirror_num;
-
- if (num_bytes > state->datablock_size)
- chunk_len = state->datablock_size;
- else
- chunk_len = num_bytes;
-
- num_copies = btrfs_num_copies(fs_info, next_bytenr,
- state->datablock_size);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- struct btrfsic_block_data_ctx next_block_ctx;
- struct btrfsic_block *next_block;
- int block_was_created;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic_handle_extent_data(mirror_num=%d)\n",
- mirror_num);
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
- pr_info("\tdisk_bytenr = %llu, num_bytes %u\n",
- next_bytenr, chunk_len);
- ret = btrfsic_map_block(state, next_bytenr,
- chunk_len, &next_block_ctx,
- mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
- next_bytenr, mirror_num);
- return -1;
- }
-
- next_block = btrfsic_block_lookup_or_add(
- state,
- &next_block_ctx,
- "referenced ",
- 0,
- force_iodone_flag,
- !force_iodone_flag,
- mirror_num,
- &block_was_created);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(&next_block_ctx);
- return -1;
- }
- if (!block_was_created) {
- if ((state->print_mask &
- BTRFSIC_PRINT_MASK_VERBOSE) &&
- next_block->logical_bytenr != next_bytenr &&
- !(!next_block->is_metadata &&
- 0 == next_block->logical_bytenr)) {
- pr_info(
-"referenced block @%llu (%pg/%llu/%d) found in hash table, D, bytenr mismatch (!= stored %llu)\n",
- next_bytenr,
- next_block_ctx.dev->bdev,
- next_block_ctx.dev_bytenr,
- mirror_num,
- next_block->logical_bytenr);
- }
- next_block->logical_bytenr = next_bytenr;
- next_block->mirror_num = mirror_num;
- }
-
- l = btrfsic_block_link_lookup_or_add(state,
- &next_block_ctx,
- next_block, block,
- generation);
- btrfsic_release_block_ctx(&next_block_ctx);
- if (NULL == l)
- return -1;
- }
-
- next_bytenr += chunk_len;
- num_bytes -= chunk_len;
- }
-
- return 0;
-}
-
-static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
- struct btrfsic_block_data_ctx *block_ctx_out,
- int mirror_num)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- int ret;
- u64 length;
- struct btrfs_io_context *bioc = NULL;
- struct btrfs_io_stripe smap, *map;
- struct btrfs_device *device;
-
- length = len;
- ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, bytenr, &length, &bioc,
- NULL, &mirror_num, 0);
- if (ret) {
- block_ctx_out->start = 0;
- block_ctx_out->dev_bytenr = 0;
- block_ctx_out->len = 0;
- block_ctx_out->dev = NULL;
- block_ctx_out->datav = NULL;
- block_ctx_out->pagev = NULL;
- block_ctx_out->mem_to_free = NULL;
-
- return ret;
- }
-
- if (bioc)
- map = &bioc->stripes[0];
- else
- map = &smap;
-
- device = map->dev;
- if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
- !device->bdev || !device->name)
- block_ctx_out->dev = NULL;
- else
- block_ctx_out->dev = btrfsic_dev_state_lookup(
- device->bdev->bd_dev);
- block_ctx_out->dev_bytenr = map->physical;
- block_ctx_out->start = bytenr;
- block_ctx_out->len = len;
- block_ctx_out->datav = NULL;
- block_ctx_out->pagev = NULL;
- block_ctx_out->mem_to_free = NULL;
-
- kfree(bioc);
- if (NULL == block_ctx_out->dev) {
- ret = -ENXIO;
- pr_info("btrfsic: error, cannot lookup dev (#1)!\n");
- }
-
- return ret;
-}
-
-static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
-{
- if (block_ctx->mem_to_free) {
- unsigned int num_pages;
-
- BUG_ON(!block_ctx->datav);
- BUG_ON(!block_ctx->pagev);
- num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
- PAGE_SHIFT;
- /* Pages must be unmapped in reverse order */
- while (num_pages > 0) {
- num_pages--;
- if (block_ctx->datav[num_pages])
- block_ctx->datav[num_pages] = NULL;
- if (block_ctx->pagev[num_pages]) {
- __free_page(block_ctx->pagev[num_pages]);
- block_ctx->pagev[num_pages] = NULL;
- }
- }
-
- kfree(block_ctx->mem_to_free);
- block_ctx->mem_to_free = NULL;
- block_ctx->pagev = NULL;
- block_ctx->datav = NULL;
- }
-}
-
-static int btrfsic_read_block(struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx)
-{
- unsigned int num_pages;
- unsigned int i;
- size_t size;
- u64 dev_bytenr;
- int ret;
-
- BUG_ON(block_ctx->datav);
- BUG_ON(block_ctx->pagev);
- BUG_ON(block_ctx->mem_to_free);
- if (!PAGE_ALIGNED(block_ctx->dev_bytenr)) {
- pr_info("btrfsic: read_block() with unaligned bytenr %llu\n",
- block_ctx->dev_bytenr);
- return -1;
- }
-
- num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
- PAGE_SHIFT;
- size = sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev);
- block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS);
- if (!block_ctx->mem_to_free)
- return -ENOMEM;
- block_ctx->datav = block_ctx->mem_to_free;
- block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
- ret = btrfs_alloc_page_array(num_pages, block_ctx->pagev);
- if (ret)
- return ret;
-
- dev_bytenr = block_ctx->dev_bytenr;
- for (i = 0; i < num_pages;) {
- struct bio *bio;
- unsigned int j;
-
- bio = bio_alloc(block_ctx->dev->bdev, num_pages - i,
- REQ_OP_READ, GFP_NOFS);
- bio->bi_iter.bi_sector = dev_bytenr >> SECTOR_SHIFT;
-
- for (j = i; j < num_pages; j++) {
- ret = bio_add_page(bio, block_ctx->pagev[j],
- PAGE_SIZE, 0);
- if (PAGE_SIZE != ret)
- break;
- }
- if (j == i) {
- pr_info("btrfsic: error, failed to add a single page!\n");
- return -1;
- }
- if (submit_bio_wait(bio)) {
- pr_info("btrfsic: read error at logical %llu dev %pg!\n",
- block_ctx->start, block_ctx->dev->bdev);
- bio_put(bio);
- return -1;
- }
- bio_put(bio);
- dev_bytenr += (j - i) * PAGE_SIZE;
- i = j;
- }
- for (i = 0; i < num_pages; i++)
- block_ctx->datav[i] = page_address(block_ctx->pagev[i]);
-
- return block_ctx->len;
-}
-
-static void btrfsic_dump_database(struct btrfsic_state *state)
-{
- const struct btrfsic_block *b_all;
-
- BUG_ON(NULL == state);
-
- pr_info("all_blocks_list:\n");
- list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
- const struct btrfsic_block_link *l;
-
- pr_info("%c-block @%llu (%pg/%llu/%d)\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num);
-
- list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
- pr_info(
- " %c @%llu (%pg/%llu/%d) refers %u* to %c @%llu (%pg/%llu/%d)\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- }
-
- list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
- pr_info(
- " %c @%llu (%pg/%llu/%d) is ref %u* from %c @%llu (%pg/%llu/%d)\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr,
- l->block_ref_from->mirror_num);
- }
-
- pr_info("\n");
- }
-}
-
-/*
- * Test whether the disk block contains a tree block (leaf or node)
- * (note that this test fails for the super block)
- */
-static noinline_for_stack int btrfsic_test_for_metadata(
- struct btrfsic_state *state,
- char **datav, unsigned int num_pages)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
- struct btrfs_header *h;
- u8 csum[BTRFS_CSUM_SIZE];
- unsigned int i;
-
- if (num_pages * PAGE_SIZE < state->metablock_size)
- return 1; /* not metadata */
- num_pages = state->metablock_size >> PAGE_SHIFT;
- h = (struct btrfs_header *)datav[0];
-
- if (memcmp(h->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE))
- return 1;
-
- shash->tfm = fs_info->csum_shash;
- crypto_shash_init(shash);
-
- for (i = 0; i < num_pages; i++) {
- u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
- size_t sublen = i ? PAGE_SIZE :
- (PAGE_SIZE - BTRFS_CSUM_SIZE);
-
- crypto_shash_update(shash, data, sublen);
- }
- crypto_shash_final(shash, csum);
- if (memcmp(csum, h->csum, fs_info->csum_size))
- return 1;
-
- return 0; /* is metadata */
-}
-
-static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
- u64 dev_bytenr, char **mapped_datav,
- unsigned int num_pages,
- struct bio *bio, int *bio_is_patched,
- blk_opf_t submit_bio_bh_rw)
-{
- int is_metadata;
- struct btrfsic_block *block;
- struct btrfsic_block_data_ctx block_ctx;
- int ret;
- struct btrfsic_state *state = dev_state->state;
- struct block_device *bdev = dev_state->bdev;
- unsigned int processed_len;
-
- if (NULL != bio_is_patched)
- *bio_is_patched = 0;
-
-again:
- if (num_pages == 0)
- return;
-
- processed_len = 0;
- is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
- num_pages));
-
- block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
- &state->block_hashtable);
- if (NULL != block) {
- u64 bytenr = 0;
- struct btrfsic_block_link *l, *tmp;
-
- if (block->is_superblock) {
- bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
- mapped_datav[0]);
- if (num_pages * PAGE_SIZE <
- BTRFS_SUPER_INFO_SIZE) {
- pr_info("btrfsic: cannot work with too short bios!\n");
- return;
- }
- is_metadata = 1;
- BUG_ON(!PAGE_ALIGNED(BTRFS_SUPER_INFO_SIZE));
- processed_len = BTRFS_SUPER_INFO_SIZE;
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
- pr_info("[before new superblock is written]:\n");
- btrfsic_dump_tree_sub(state, block, 0);
- }
- }
- if (is_metadata) {
- if (!block->is_superblock) {
- if (num_pages * PAGE_SIZE <
- state->metablock_size) {
- pr_info("btrfsic: cannot work with too short bios!\n");
- return;
- }
- processed_len = state->metablock_size;
- bytenr = btrfs_stack_header_bytenr(
- (struct btrfs_header *)
- mapped_datav[0]);
- btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
- dev_state,
- dev_bytenr);
- }
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
- if (block->logical_bytenr != bytenr &&
- !(!block->is_metadata &&
- block->logical_bytenr == 0))
- pr_info(
-"written block @%llu (%pg/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu)\n",
- bytenr, dev_state->bdev,
- dev_bytenr,
- block->mirror_num,
- btrfsic_get_block_type(state,
- block),
- block->logical_bytenr);
- else
- pr_info(
- "written block @%llu (%pg/%llu/%d) found in hash table, %c\n",
- bytenr, dev_state->bdev,
- dev_bytenr, block->mirror_num,
- btrfsic_get_block_type(state,
- block));
- }
- block->logical_bytenr = bytenr;
- } else {
- if (num_pages * PAGE_SIZE <
- state->datablock_size) {
- pr_info("btrfsic: cannot work with too short bios!\n");
- return;
- }
- processed_len = state->datablock_size;
- bytenr = block->logical_bytenr;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "written block @%llu (%pg/%llu/%d) found in hash table, %c\n",
- bytenr, dev_state->bdev, dev_bytenr,
- block->mirror_num,
- btrfsic_get_block_type(state, block));
- }
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("ref_to_list: %cE, ref_from_list: %cE\n",
- list_empty(&block->ref_to_list) ? ' ' : '!',
- list_empty(&block->ref_from_list) ? ' ' : '!');
- if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
- pr_info(
-"btrfs: attempt to overwrite %c-block @%llu (%pg/%llu/%d), old(gen=%llu, objectid=%llu, type=%d, offset=%llu), new(gen=%llu), which is referenced by most recent superblock (superblockgen=%llu)!\n",
- btrfsic_get_block_type(state, block), bytenr,
- dev_state->bdev, dev_bytenr, block->mirror_num,
- block->generation,
- btrfs_disk_key_objectid(&block->disk_key),
- block->disk_key.type,
- btrfs_disk_key_offset(&block->disk_key),
- btrfs_stack_header_generation(
- (struct btrfs_header *) mapped_datav[0]),
- state->max_superblock_generation);
- btrfsic_dump_tree(state);
- }
-
- if (!block->is_iodone && !block->never_written) {
- pr_info(
-"btrfs: attempt to overwrite %c-block @%llu (%pg/%llu/%d), oldgen=%llu, newgen=%llu, which is not yet iodone!\n",
- btrfsic_get_block_type(state, block), bytenr,
- dev_state->bdev, dev_bytenr, block->mirror_num,
- block->generation,
- btrfs_stack_header_generation(
- (struct btrfs_header *)
- mapped_datav[0]));
- /* it would not be safe to go on */
- btrfsic_dump_tree(state);
- goto continue_loop;
- }
-
- /*
- * Clear all references of this block. Do not free
- * the block itself even if is not referenced anymore
- * because it still carries valuable information
- * like whether it was ever written and IO completed.
- */
- list_for_each_entry_safe(l, tmp, &block->ref_to_list,
- node_ref_to) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_rem_link(state, l);
- l->ref_cnt--;
- if (0 == l->ref_cnt) {
- list_del(&l->node_ref_to);
- list_del(&l->node_ref_from);
- btrfsic_block_link_hashtable_remove(l);
- btrfsic_block_link_free(l);
- }
- }
-
- block_ctx.dev = dev_state;
- block_ctx.dev_bytenr = dev_bytenr;
- block_ctx.start = bytenr;
- block_ctx.len = processed_len;
- block_ctx.pagev = NULL;
- block_ctx.mem_to_free = NULL;
- block_ctx.datav = mapped_datav;
-
- if (is_metadata || state->include_extent_data) {
- block->never_written = 0;
- block->iodone_w_error = 0;
- if (NULL != bio) {
- block->is_iodone = 0;
- BUG_ON(NULL == bio_is_patched);
- if (!*bio_is_patched) {
- block->orig_bio_private =
- bio->bi_private;
- block->orig_bio_end_io =
- bio->bi_end_io;
- block->next_in_same_bio = NULL;
- bio->bi_private = block;
- bio->bi_end_io = btrfsic_bio_end_io;
- *bio_is_patched = 1;
- } else {
- struct btrfsic_block *chained_block =
- (struct btrfsic_block *)
- bio->bi_private;
-
- BUG_ON(NULL == chained_block);
- block->orig_bio_private =
- chained_block->orig_bio_private;
- block->orig_bio_end_io =
- chained_block->orig_bio_end_io;
- block->next_in_same_bio = chained_block;
- bio->bi_private = block;
- }
- } else {
- block->is_iodone = 1;
- block->orig_bio_private = NULL;
- block->orig_bio_end_io = NULL;
- block->next_in_same_bio = NULL;
- }
- }
-
- block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = submit_bio_bh_rw;
- if (is_metadata) {
- block->logical_bytenr = bytenr;
- block->is_metadata = 1;
- if (block->is_superblock) {
- BUG_ON(PAGE_SIZE !=
- BTRFS_SUPER_INFO_SIZE);
- ret = btrfsic_process_written_superblock(
- state,
- block,
- (struct btrfs_super_block *)
- mapped_datav[0]);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
- pr_info("[after new superblock is written]:\n");
- btrfsic_dump_tree_sub(state, block, 0);
- }
- } else {
- block->mirror_num = 0; /* unknown */
- ret = btrfsic_process_metablock(
- state,
- block,
- &block_ctx,
- 0, 0);
- }
- if (ret)
- pr_info("btrfsic: btrfsic_process_metablock(root @%llu) failed!\n",
- dev_bytenr);
- } else {
- block->is_metadata = 0;
- block->mirror_num = 0; /* unknown */
- block->generation = BTRFSIC_GENERATION_UNKNOWN;
- if (!state->include_extent_data
- && list_empty(&block->ref_from_list)) {
- /*
- * disk block is overwritten with extent
- * data (not meta data) and we are configured
- * to not include extent data: take the
- * chance and free the block's memory
- */
- btrfsic_block_hashtable_remove(block);
- list_del(&block->all_blocks_node);
- btrfsic_block_free(block);
- }
- }
- btrfsic_release_block_ctx(&block_ctx);
- } else {
- /* block has not been found in hash table */
- u64 bytenr;
-
- if (!is_metadata) {
- processed_len = state->datablock_size;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "written block (%pg/%llu/?) !found in hash table, D\n",
- dev_state->bdev, dev_bytenr);
- if (!state->include_extent_data) {
- /* ignore that written D block */
- goto continue_loop;
- }
-
- /* this is getting ugly for the
- * include_extent_data case... */
- bytenr = 0; /* unknown */
- } else {
- processed_len = state->metablock_size;
- bytenr = btrfs_stack_header_bytenr(
- (struct btrfs_header *)
- mapped_datav[0]);
- btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
- dev_bytenr);
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "written block @%llu (%pg/%llu/?) !found in hash table, M\n",
- bytenr, dev_state->bdev, dev_bytenr);
- }
-
- block_ctx.dev = dev_state;
- block_ctx.dev_bytenr = dev_bytenr;
- block_ctx.start = bytenr;
- block_ctx.len = processed_len;
- block_ctx.pagev = NULL;
- block_ctx.mem_to_free = NULL;
- block_ctx.datav = mapped_datav;
-
- block = btrfsic_block_alloc();
- if (NULL == block) {
- btrfsic_release_block_ctx(&block_ctx);
- goto continue_loop;
- }
- block->dev_state = dev_state;
- block->dev_bytenr = dev_bytenr;
- block->logical_bytenr = bytenr;
- block->is_metadata = is_metadata;
- block->never_written = 0;
- block->iodone_w_error = 0;
- block->mirror_num = 0; /* unknown */
- block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = submit_bio_bh_rw;
- if (NULL != bio) {
- block->is_iodone = 0;
- BUG_ON(NULL == bio_is_patched);
- if (!*bio_is_patched) {
- block->orig_bio_private = bio->bi_private;
- block->orig_bio_end_io = bio->bi_end_io;
- block->next_in_same_bio = NULL;
- bio->bi_private = block;
- bio->bi_end_io = btrfsic_bio_end_io;
- *bio_is_patched = 1;
- } else {
- struct btrfsic_block *chained_block =
- (struct btrfsic_block *)
- bio->bi_private;
-
- BUG_ON(NULL == chained_block);
- block->orig_bio_private =
- chained_block->orig_bio_private;
- block->orig_bio_end_io =
- chained_block->orig_bio_end_io;
- block->next_in_same_bio = chained_block;
- bio->bi_private = block;
- }
- } else {
- block->is_iodone = 1;
- block->orig_bio_private = NULL;
- block->orig_bio_end_io = NULL;
- block->next_in_same_bio = NULL;
- }
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("new written %c-block @%llu (%pg/%llu/%d)\n",
- is_metadata ? 'M' : 'D',
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num);
- list_add(&block->all_blocks_node, &state->all_blocks_list);
- btrfsic_block_hashtable_add(block, &state->block_hashtable);
-
- if (is_metadata) {
- ret = btrfsic_process_metablock(state, block,
- &block_ctx, 0, 0);
- if (ret)
- pr_info("btrfsic: process_metablock(root @%llu) failed!\n",
- dev_bytenr);
- }
- btrfsic_release_block_ctx(&block_ctx);
- }
-
-continue_loop:
- BUG_ON(!processed_len);
- dev_bytenr += processed_len;
- mapped_datav += processed_len >> PAGE_SHIFT;
- num_pages -= processed_len >> PAGE_SHIFT;
- goto again;
-}
-
-static void btrfsic_bio_end_io(struct bio *bp)
-{
- struct btrfsic_block *block = bp->bi_private;
- int iodone_w_error;
-
- /* mutex is not held! This is not save if IO is not yet completed
- * on umount */
- iodone_w_error = 0;
- if (bp->bi_status)
- iodone_w_error = 1;
-
- BUG_ON(NULL == block);
- bp->bi_private = block->orig_bio_private;
- bp->bi_end_io = block->orig_bio_end_io;
-
- do {
- struct btrfsic_block *next_block;
- struct btrfsic_dev_state *const dev_state = block->dev_state;
-
- if ((dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- pr_info("bio_end_io(err=%d) for %c @%llu (%pg/%llu/%d)\n",
- bp->bi_status,
- btrfsic_get_block_type(dev_state->state, block),
- block->logical_bytenr, dev_state->bdev,
- block->dev_bytenr, block->mirror_num);
- next_block = block->next_in_same_bio;
- block->iodone_w_error = iodone_w_error;
- if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
- dev_state->last_flush_gen++;
- if ((dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
- pr_info("bio_end_io() new %pg flush_gen=%llu\n",
- dev_state->bdev,
- dev_state->last_flush_gen);
- }
- if (block->submit_bio_bh_rw & REQ_FUA)
- block->flush_gen = 0; /* FUA completed means block is
- * on disk */
- block->is_iodone = 1; /* for FLUSH, this releases the block */
- block = next_block;
- } while (NULL != block);
-
- bp->bi_end_io(bp);
-}
-
-static int btrfsic_process_written_superblock(
- struct btrfsic_state *state,
- struct btrfsic_block *const superblock,
- struct btrfs_super_block *const super_hdr)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- int pass;
-
- superblock->generation = btrfs_super_generation(super_hdr);
- if (!(superblock->generation > state->max_superblock_generation ||
- 0 == state->max_superblock_generation)) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- pr_info(
- "btrfsic: superblock @%llu (%pg/%llu/%d) with old gen %llu <= %llu\n",
- superblock->logical_bytenr,
- superblock->dev_state->bdev,
- superblock->dev_bytenr, superblock->mirror_num,
- btrfs_super_generation(super_hdr),
- state->max_superblock_generation);
- } else {
- if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
- pr_info(
- "btrfsic: got new superblock @%llu (%pg/%llu/%d) with new gen %llu > %llu\n",
- superblock->logical_bytenr,
- superblock->dev_state->bdev,
- superblock->dev_bytenr, superblock->mirror_num,
- btrfs_super_generation(super_hdr),
- state->max_superblock_generation);
-
- state->max_superblock_generation =
- btrfs_super_generation(super_hdr);
- state->latest_superblock = superblock;
- }
-
- for (pass = 0; pass < 3; pass++) {
- int ret;
- u64 next_bytenr;
- struct btrfsic_block *next_block;
- struct btrfsic_block_data_ctx tmp_next_block_ctx;
- struct btrfsic_block_link *l;
- int num_copies;
- int mirror_num;
- const char *additional_string = NULL;
- struct btrfs_disk_key tmp_disk_key = {0};
-
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_ROOT_ITEM_KEY);
- btrfs_set_disk_key_objectid(&tmp_disk_key, 0);
-
- switch (pass) {
- case 0:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_ROOT_TREE_OBJECTID);
- additional_string = "root ";
- next_bytenr = btrfs_super_root(super_hdr);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("root@%llu\n", next_bytenr);
- break;
- case 1:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_CHUNK_TREE_OBJECTID);
- additional_string = "chunk ";
- next_bytenr = btrfs_super_chunk_root(super_hdr);
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("chunk@%llu\n", next_bytenr);
- break;
- case 2:
- btrfs_set_disk_key_objectid(&tmp_disk_key,
- BTRFS_TREE_LOG_OBJECTID);
- additional_string = "log ";
- next_bytenr = btrfs_super_log_root(super_hdr);
- if (0 == next_bytenr)
- continue;
- if (state->print_mask &
- BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
- pr_info("log@%llu\n", next_bytenr);
- break;
- }
-
- num_copies = btrfs_num_copies(fs_info, next_bytenr,
- BTRFS_SUPER_INFO_SIZE);
- if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
- pr_info("num_copies(log_bytenr=%llu) = %d\n",
- next_bytenr, num_copies);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- int was_created;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic_process_written_superblock(mirror_num=%d)\n", mirror_num);
- ret = btrfsic_map_block(state, next_bytenr,
- BTRFS_SUPER_INFO_SIZE,
- &tmp_next_block_ctx,
- mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
- next_bytenr, mirror_num);
- return -1;
- }
-
- next_block = btrfsic_block_lookup_or_add(
- state,
- &tmp_next_block_ctx,
- additional_string,
- 1, 0, 1,
- mirror_num,
- &was_created);
- if (NULL == next_block) {
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- return -1;
- }
-
- next_block->disk_key = tmp_disk_key;
- if (was_created)
- next_block->generation =
- BTRFSIC_GENERATION_UNKNOWN;
- l = btrfsic_block_link_lookup_or_add(
- state,
- &tmp_next_block_ctx,
- next_block,
- superblock,
- BTRFSIC_GENERATION_UNKNOWN);
- btrfsic_release_block_ctx(&tmp_next_block_ctx);
- if (NULL == l)
- return -1;
- }
- }
-
- if (WARN_ON(-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)))
- btrfsic_dump_tree(state);
-
- return 0;
-}
-
-static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
- struct btrfsic_block *const block,
- int recursion_level)
-{
- const struct btrfsic_block_link *l;
- int ret = 0;
-
- if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
- /*
- * Note that this situation can happen and does not
- * indicate an error in regular cases. It happens
- * when disk blocks are freed and later reused.
- * The check-integrity module is not aware of any
- * block free operations, it just recognizes block
- * write operations. Therefore it keeps the linkage
- * information for a block until a block is
- * rewritten. This can temporarily cause incorrect
- * and even circular linkage information. This
- * causes no harm unless such blocks are referenced
- * by the most recent super block.
- */
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic: abort cyclic linkage (case 1).\n");
-
- return ret;
- }
-
- /*
- * This algorithm is recursive because the amount of used stack
- * space is very small and the max recursion depth is limited.
- */
- list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "rl=%d, %c @%llu (%pg/%llu/%d) %u* refers to %c @%llu (%pg/%llu/%d)\n",
- recursion_level,
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- if (l->block_ref_to->never_written) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is never written!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- ret = -1;
- } else if (!l->block_ref_to->is_iodone) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is not yet iodone!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- ret = -1;
- } else if (l->block_ref_to->iodone_w_error) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which has write error!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
- ret = -1;
- } else if (l->parent_generation !=
- l->block_ref_to->generation &&
- BTRFSIC_GENERATION_UNKNOWN !=
- l->parent_generation &&
- BTRFSIC_GENERATION_UNKNOWN !=
- l->block_ref_to->generation) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) with generation %llu != parent generation %llu!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num,
- l->block_ref_to->generation,
- l->parent_generation);
- ret = -1;
- } else if (l->block_ref_to->flush_gen >
- l->block_ref_to->dev_state->last_flush_gen) {
- pr_info(
-"btrfs: attempt to write superblock which references block %c @%llu (%pg/%llu/%d) which is not flushed out of disk's write cache (block flush_gen=%llu, dev->flush_gen=%llu)!\n",
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev,
- l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num, block->flush_gen,
- l->block_ref_to->dev_state->last_flush_gen);
- ret = -1;
- } else if (-1 == btrfsic_check_all_ref_blocks(state,
- l->block_ref_to,
- recursion_level +
- 1)) {
- ret = -1;
- }
- }
-
- return ret;
-}
-
-static int btrfsic_is_block_ref_by_superblock(
- const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int recursion_level)
-{
- const struct btrfsic_block_link *l;
-
- if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
- /* refer to comment at "abort cyclic linkage (case 1)" */
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("btrfsic: abort cyclic linkage (case 2).\n");
-
- return 0;
- }
-
- /*
- * This algorithm is recursive because the amount of used stack space
- * is very small and the max recursion depth is limited.
- */
- list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info(
- "rl=%d, %c @%llu (%pg/%llu/%d) is ref %u* from %c @%llu (%pg/%llu/%d)\n",
- recursion_level,
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num,
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr,
- l->block_ref_from->mirror_num);
- if (l->block_ref_from->is_superblock &&
- state->latest_superblock->dev_bytenr ==
- l->block_ref_from->dev_bytenr &&
- state->latest_superblock->dev_state->bdev ==
- l->block_ref_from->dev_state->bdev)
- return 1;
- else if (btrfsic_is_block_ref_by_superblock(state,
- l->block_ref_from,
- recursion_level +
- 1))
- return 1;
- }
-
- return 0;
-}
-
-static void btrfsic_print_add_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l)
-{
- pr_info("add %u* link from %c @%llu (%pg/%llu/%d) to %c @%llu (%pg/%llu/%d)\n",
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev, l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
-}
-
-static void btrfsic_print_rem_link(const struct btrfsic_state *state,
- const struct btrfsic_block_link *l)
-{
- pr_info("rem %u* link from %c @%llu (%pg/%llu/%d) to %c @%llu (%pg/%llu/%d)\n",
- l->ref_cnt,
- btrfsic_get_block_type(state, l->block_ref_from),
- l->block_ref_from->logical_bytenr,
- l->block_ref_from->dev_state->bdev,
- l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num,
- btrfsic_get_block_type(state, l->block_ref_to),
- l->block_ref_to->logical_bytenr,
- l->block_ref_to->dev_state->bdev, l->block_ref_to->dev_bytenr,
- l->block_ref_to->mirror_num);
-}
-
-static char btrfsic_get_block_type(const struct btrfsic_state *state,
- const struct btrfsic_block *block)
-{
- if (block->is_superblock &&
- state->latest_superblock->dev_bytenr == block->dev_bytenr &&
- state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
- return 'S';
- else if (block->is_superblock)
- return 's';
- else if (block->is_metadata)
- return 'M';
- else
- return 'D';
-}
-
-static void btrfsic_dump_tree(const struct btrfsic_state *state)
-{
- btrfsic_dump_tree_sub(state, state->latest_superblock, 0);
-}
-
-static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
- const struct btrfsic_block *block,
- int indent_level)
-{
- const struct btrfsic_block_link *l;
- int indent_add;
- static char buf[80];
- int cursor_position;
-
- /*
- * Should better fill an on-stack buffer with a complete line and
- * dump it at once when it is time to print a newline character.
- */
-
- /*
- * This algorithm is recursive because the amount of used stack space
- * is very small and the max recursion depth is limited.
- */
- indent_add = sprintf(buf, "%c-%llu(%pg/%llu/%u)",
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, block->dev_state->bdev,
- block->dev_bytenr, block->mirror_num);
- if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
- printk("[...]\n");
- return;
- }
- printk(buf);
- indent_level += indent_add;
- if (list_empty(&block->ref_to_list)) {
- printk("\n");
- return;
- }
- if (block->mirror_num > 1 &&
- !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) {
- printk(" [...]\n");
- return;
- }
-
- cursor_position = indent_level;
- list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
- while (cursor_position < indent_level) {
- printk(" ");
- cursor_position++;
- }
- if (l->ref_cnt > 1)
- indent_add = sprintf(buf, " %d*--> ", l->ref_cnt);
- else
- indent_add = sprintf(buf, " --> ");
- if (indent_level + indent_add >
- BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
- printk("[...]\n");
- cursor_position = 0;
- continue;
- }
-
- printk(buf);
-
- btrfsic_dump_tree_sub(state, l->block_ref_to,
- indent_level + indent_add);
- cursor_position = 0;
- }
-}
-
-static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *next_block_ctx,
- struct btrfsic_block *next_block,
- struct btrfsic_block *from_block,
- u64 parent_generation)
-{
- struct btrfsic_block_link *l;
-
- l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev,
- next_block_ctx->dev_bytenr,
- from_block->dev_state->bdev,
- from_block->dev_bytenr,
- &state->block_link_hashtable);
- if (NULL == l) {
- l = btrfsic_block_link_alloc();
- if (!l)
- return NULL;
-
- l->block_ref_to = next_block;
- l->block_ref_from = from_block;
- l->ref_cnt = 1;
- l->parent_generation = parent_generation;
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
-
- list_add(&l->node_ref_to, &from_block->ref_to_list);
- list_add(&l->node_ref_from, &next_block->ref_from_list);
-
- btrfsic_block_link_hashtable_add(l,
- &state->block_link_hashtable);
- } else {
- l->ref_cnt++;
- l->parent_generation = parent_generation;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_add_link(state, l);
- }
-
- return l;
-}
-
-static struct btrfsic_block *btrfsic_block_lookup_or_add(
- struct btrfsic_state *state,
- struct btrfsic_block_data_ctx *block_ctx,
- const char *additional_string,
- int is_metadata,
- int is_iodone,
- int never_written,
- int mirror_num,
- int *was_created)
-{
- struct btrfsic_block *block;
-
- block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
- block_ctx->dev_bytenr,
- &state->block_hashtable);
- if (NULL == block) {
- struct btrfsic_dev_state *dev_state;
-
- block = btrfsic_block_alloc();
- if (!block)
- return NULL;
-
- dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev->bd_dev);
- if (NULL == dev_state) {
- pr_info("btrfsic: error, lookup dev_state failed!\n");
- btrfsic_block_free(block);
- return NULL;
- }
- block->dev_state = dev_state;
- block->dev_bytenr = block_ctx->dev_bytenr;
- block->logical_bytenr = block_ctx->start;
- block->is_metadata = is_metadata;
- block->is_iodone = is_iodone;
- block->never_written = never_written;
- block->mirror_num = mirror_num;
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- pr_info("New %s%c-block @%llu (%pg/%llu/%d)\n",
- additional_string,
- btrfsic_get_block_type(state, block),
- block->logical_bytenr, dev_state->bdev,
- block->dev_bytenr, mirror_num);
- list_add(&block->all_blocks_node, &state->all_blocks_list);
- btrfsic_block_hashtable_add(block, &state->block_hashtable);
- if (NULL != was_created)
- *was_created = 1;
- } else {
- if (NULL != was_created)
- *was_created = 0;
- }
-
- return block;
-}
-
-static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
- u64 bytenr,
- struct btrfsic_dev_state *dev_state,
- u64 dev_bytenr)
-{
- struct btrfs_fs_info *fs_info = state->fs_info;
- struct btrfsic_block_data_ctx block_ctx;
- int num_copies;
- int mirror_num;
- int match = 0;
- int ret;
-
- num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size);
-
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- ret = btrfsic_map_block(state, bytenr, state->metablock_size,
- &block_ctx, mirror_num);
- if (ret) {
- pr_info("btrfsic: btrfsic_map_block(logical @%llu, mirror %d) failed!\n",
- bytenr, mirror_num);
- continue;
- }
-
- if (dev_state->bdev == block_ctx.dev->bdev &&
- dev_bytenr == block_ctx.dev_bytenr) {
- match++;
- btrfsic_release_block_ctx(&block_ctx);
- break;
- }
- btrfsic_release_block_ctx(&block_ctx);
- }
-
- if (WARN_ON(!match)) {
- pr_info(
-"btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio, buffer->log_bytenr=%llu, submit_bio(bdev=%pg, phys_bytenr=%llu)!\n",
- bytenr, dev_state->bdev, dev_bytenr);
- for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
- ret = btrfsic_map_block(state, bytenr,
- state->metablock_size,
- &block_ctx, mirror_num);
- if (ret)
- continue;
-
- pr_info("read logical bytenr @%llu maps to (%pg/%llu/%d)\n",
- bytenr, block_ctx.dev->bdev,
- block_ctx.dev_bytenr, mirror_num);
- }
- }
-}
-
-static struct btrfsic_dev_state *btrfsic_dev_state_lookup(dev_t dev)
-{
- return btrfsic_dev_state_hashtable_lookup(dev,
- &btrfsic_dev_state_hashtable);
-}
-
-static void btrfsic_check_write_bio(struct bio *bio, struct btrfsic_dev_state *dev_state)
-{
- unsigned int segs = bio_segments(bio);
- u64 dev_bytenr = 512 * bio->bi_iter.bi_sector;
- u64 cur_bytenr = dev_bytenr;
- struct bvec_iter iter;
- struct bio_vec bvec;
- char **mapped_datav;
- int bio_is_patched = 0;
- int i = 0;
-
- if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- pr_info(
-"submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
- bio_op(bio), bio->bi_opf, segs,
- bio->bi_iter.bi_sector, dev_bytenr, bio->bi_bdev);
-
- mapped_datav = kmalloc_array(segs, sizeof(*mapped_datav), GFP_NOFS);
- if (!mapped_datav)
- return;
-
- bio_for_each_segment(bvec, bio, iter) {
- BUG_ON(bvec.bv_len != PAGE_SIZE);
- mapped_datav[i] = page_address(bvec.bv_page);
- i++;
-
- if (dev_state->state->print_mask &
- BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
- pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
- i, cur_bytenr, bvec.bv_len, bvec.bv_offset);
- cur_bytenr += bvec.bv_len;
- }
-
- btrfsic_process_written_block(dev_state, dev_bytenr, mapped_datav, segs,
- bio, &bio_is_patched, bio->bi_opf);
- kfree(mapped_datav);
-}
-
-static void btrfsic_check_flush_bio(struct bio *bio, struct btrfsic_dev_state *dev_state)
-{
- if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
- pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
- bio_op(bio), bio->bi_opf, bio->bi_bdev);
-
- if (dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
- struct btrfsic_block *const block =
- &dev_state->dummy_block_for_bio_bh_flush;
-
- block->is_iodone = 0;
- block->never_written = 0;
- block->iodone_w_error = 0;
- block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = bio->bi_opf;
- block->orig_bio_private = bio->bi_private;
- block->orig_bio_end_io = bio->bi_end_io;
- block->next_in_same_bio = NULL;
- bio->bi_private = block;
- bio->bi_end_io = btrfsic_bio_end_io;
- } else if ((dev_state->state->print_mask &
- (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
- BTRFSIC_PRINT_MASK_VERBOSE))) {
- pr_info(
-"btrfsic_submit_bio(%pg) with FLUSH but dummy block already in use (ignored)!\n",
- dev_state->bdev);
- }
-}
-
-void btrfsic_check_bio(struct bio *bio)
-{
- struct btrfsic_dev_state *dev_state;
-
- if (!btrfsic_is_initialized)
- return;
-
- /*
- * We can be called before btrfsic_mount, so there might not be a
- * dev_state.
- */
- dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev);
- mutex_lock(&btrfsic_mutex);
- if (dev_state) {
- if (bio_op(bio) == REQ_OP_WRITE && bio_has_data(bio))
- btrfsic_check_write_bio(bio, dev_state);
- else if (bio->bi_opf & REQ_PREFLUSH)
- btrfsic_check_flush_bio(bio, dev_state);
- }
- mutex_unlock(&btrfsic_mutex);
-}
-
-int btrfsic_mount(struct btrfs_fs_info *fs_info,
- struct btrfs_fs_devices *fs_devices,
- int including_extent_data, u32 print_mask)
-{
- int ret;
- struct btrfsic_state *state;
- struct list_head *dev_head = &fs_devices->devices;
- struct btrfs_device *device;
-
- if (!PAGE_ALIGNED(fs_info->nodesize)) {
- pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
- fs_info->nodesize, PAGE_SIZE);
- return -1;
- }
- if (!PAGE_ALIGNED(fs_info->sectorsize)) {
- pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
- fs_info->sectorsize, PAGE_SIZE);
- return -1;
- }
- state = kvzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- if (!btrfsic_is_initialized) {
- mutex_init(&btrfsic_mutex);
- btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable);
- btrfsic_is_initialized = 1;
- }
- mutex_lock(&btrfsic_mutex);
- state->fs_info = fs_info;
- state->print_mask = print_mask;
- state->include_extent_data = including_extent_data;
- state->metablock_size = fs_info->nodesize;
- state->datablock_size = fs_info->sectorsize;
- INIT_LIST_HEAD(&state->all_blocks_list);
- btrfsic_block_hashtable_init(&state->block_hashtable);
- btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
- state->max_superblock_generation = 0;
- state->latest_superblock = NULL;
-
- list_for_each_entry(device, dev_head, dev_list) {
- struct btrfsic_dev_state *ds;
-
- if (!device->bdev || !device->name)
- continue;
-
- ds = btrfsic_dev_state_alloc();
- if (NULL == ds) {
- mutex_unlock(&btrfsic_mutex);
- return -ENOMEM;
- }
- ds->bdev = device->bdev;
- ds->state = state;
- btrfsic_dev_state_hashtable_add(ds,
- &btrfsic_dev_state_hashtable);
- }
-
- ret = btrfsic_process_superblock(state, fs_devices);
- if (0 != ret) {
- mutex_unlock(&btrfsic_mutex);
- btrfsic_unmount(fs_devices);
- return ret;
- }
-
- if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE)
- btrfsic_dump_database(state);
- if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE)
- btrfsic_dump_tree(state);
-
- mutex_unlock(&btrfsic_mutex);
- return 0;
-}
-
-void btrfsic_unmount(struct btrfs_fs_devices *fs_devices)
-{
- struct btrfsic_block *b_all, *tmp_all;
- struct btrfsic_state *state;
- struct list_head *dev_head = &fs_devices->devices;
- struct btrfs_device *device;
-
- if (!btrfsic_is_initialized)
- return;
-
- mutex_lock(&btrfsic_mutex);
-
- state = NULL;
- list_for_each_entry(device, dev_head, dev_list) {
- struct btrfsic_dev_state *ds;
-
- if (!device->bdev || !device->name)
- continue;
-
- ds = btrfsic_dev_state_hashtable_lookup(
- device->bdev->bd_dev,
- &btrfsic_dev_state_hashtable);
- if (NULL != ds) {
- state = ds->state;
- btrfsic_dev_state_hashtable_remove(ds);
- btrfsic_dev_state_free(ds);
- }
- }
-
- if (NULL == state) {
- pr_info("btrfsic: error, cannot find state information on umount!\n");
- mutex_unlock(&btrfsic_mutex);
- return;
- }
-
- /*
- * Don't care about keeping the lists' state up to date,
- * just free all memory that was allocated dynamically.
- * Free the blocks and the block_links.
- */
- list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list,
- all_blocks_node) {
- struct btrfsic_block_link *l, *tmp;
-
- list_for_each_entry_safe(l, tmp, &b_all->ref_to_list,
- node_ref_to) {
- if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
- btrfsic_print_rem_link(state, l);
-
- l->ref_cnt--;
- if (0 == l->ref_cnt)
- btrfsic_block_link_free(l);
- }
-
- if (b_all->is_iodone || b_all->never_written)
- btrfsic_block_free(b_all);
- else
- pr_info(
-"btrfs: attempt to free %c-block @%llu (%pg/%llu/%d) on umount which is not yet iodone!\n",
- btrfsic_get_block_type(state, b_all),
- b_all->logical_bytenr, b_all->dev_state->bdev,
- b_all->dev_bytenr, b_all->mirror_num);
- }
-
- mutex_unlock(&btrfsic_mutex);
-
- kvfree(state);
-}
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
deleted file mode 100644
index e4c8aed7996f..000000000000
--- a/fs/btrfs/check-integrity.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) STRATO AG 2011. All rights reserved.
- */
-
-#ifndef BTRFS_CHECK_INTEGRITY_H
-#define BTRFS_CHECK_INTEGRITY_H
-
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-void btrfsic_check_bio(struct bio *bio);
-#else
-static inline void btrfsic_check_bio(struct bio *bio) { }
-#endif
-
-int btrfsic_mount(struct btrfs_fs_info *fs_info,
- struct btrfs_fs_devices *fs_devices,
- int including_extent_data, u32 print_mask);
-void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
-
-#endif
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 8818ed5c390f..19b22b4653c8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -193,12 +193,12 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb)
unsigned long index = cb->start >> PAGE_SHIFT;
unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
struct folio_batch fbatch;
- const int errno = blk_status_to_errno(cb->bbio.bio.bi_status);
+ const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
int i;
int ret;
- if (errno)
- mapping_set_error(inode->i_mapping, errno);
+ if (error)
+ mapping_set_error(inode->i_mapping, error);
folio_batch_init(&fbatch);
while (index <= end_index) {
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index da519c1b6ad0..2a9344a3fcee 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -230,9 +230,9 @@ noinline void btrfs_release_path(struct btrfs_path *p)
* cause could be a bug, eg. due to ENOSPC, and not for common errors that are
* caused by external factors.
*/
-bool __cold abort_should_print_stack(int errno)
+bool __cold abort_should_print_stack(int error)
{
- switch (errno) {
+ switch (error) {
case -EIO:
case -EROFS:
case -ENOMEM:
@@ -316,6 +316,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
int ret = 0;
int level;
struct btrfs_disk_key disk_key;
+ u64 reloc_src_root = 0;
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
@@ -328,9 +329,11 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
+ if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
+ reloc_src_root = btrfs_header_owner(buf);
cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
&disk_key, level, buf->start, 0,
- BTRFS_NESTING_NEW_ROOT);
+ reloc_src_root, BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -359,7 +362,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
return ret;
}
- btrfs_mark_buffer_dirty(cow);
+ btrfs_mark_buffer_dirty(trans, cow);
*cow_ret = cow;
return 0;
}
@@ -367,7 +370,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
/*
* check if the tree block can be shared by multiple trees
*/
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct extent_buffer *buf)
{
/*
@@ -376,11 +380,21 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
* not allocated by tree relocation, we know the block is not shared.
*/
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- buf != root->node && buf != root->commit_root &&
+ buf != root->node &&
(btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item) ||
- btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
- return 1;
+ btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
+ if (buf != root->commit_root)
+ return 1;
+ /*
+ * An extent buffer that used to be the commit root may still be
+ * shared because the tree height may have increased and it
+ * became a child of a higher level root. This can happen when
+ * snapshotting a subvolume created in the current transaction.
+ */
+ if (btrfs_header_generation(buf) == trans->transid)
+ return 1;
+ }
return 0;
}
@@ -415,7 +429,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
* are only allowed for blocks use full backrefs.
*/
- if (btrfs_block_can_be_shared(root, buf)) {
+ if (btrfs_block_can_be_shared(trans, root, buf)) {
ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
btrfs_header_level(buf), 1,
&refs, &flags);
@@ -507,13 +521,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
* bytes the allocator should try to find free next to the block it returns.
* This is just a hint and may be ignored by the allocator.
*/
-static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct extent_buffer *buf,
- struct extent_buffer *parent, int parent_slot,
- struct extent_buffer **cow_ret,
- u64 search_start, u64 empty_size,
- enum btrfs_lock_nesting nest)
+int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *buf,
+ struct extent_buffer *parent, int parent_slot,
+ struct extent_buffer **cow_ret,
+ u64 search_start, u64 empty_size,
+ enum btrfs_lock_nesting nest)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key;
@@ -522,6 +536,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
int last_ref = 0;
int unlock_orig = 0;
u64 parent_start = 0;
+ u64 reloc_src_root = 0;
if (*cow_ret == buf)
unlock_orig = 1;
@@ -540,12 +555,14 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
- if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
- parent_start = parent->start;
-
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ if (parent)
+ parent_start = parent->start;
+ reloc_src_root = btrfs_header_owner(buf);
+ }
cow = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, &disk_key, level,
- search_start, empty_size, nest);
+ search_start, empty_size, reloc_src_root, nest);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -616,7 +633,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
cow->start);
btrfs_set_node_ptr_generation(parent, parent_slot,
trans->transid);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
if (last_ref) {
ret = btrfs_tree_mod_log_free_eb(buf);
if (ret) {
@@ -632,7 +649,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (unlock_orig)
btrfs_tree_unlock(buf);
free_extent_buffer_stale(buf);
- btrfs_mark_buffer_dirty(cow);
+ btrfs_mark_buffer_dirty(trans, cow);
*cow_ret = cow;
return 0;
}
@@ -668,11 +685,11 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
}
/*
- * cows a single block, see __btrfs_cow_block for the real work.
+ * COWs a single block, see btrfs_force_cow_block() for the real work.
* This version of it has extra checks so that a block isn't COWed more than
* once per transaction, as long as it hasn't been written yet
*/
-noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
@@ -712,7 +729,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
return 0;
}
- search_start = buf->start & ~((u64)SZ_1G - 1);
+ search_start = round_down(buf->start, SZ_1G);
/*
* Before CoWing this block for later modification, check if it's
@@ -721,8 +738,8 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
* Also We don't care about the error, as it's handled internally.
*/
btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
- ret = __btrfs_cow_block(trans, root, buf, parent,
- parent_slot, cow_ret, search_start, 0, nest);
+ ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
+ cow_ret, search_start, 0, nest);
trace_btrfs_cow_block(root, buf, *cow_ret);
@@ -731,49 +748,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
/*
- * helper function for defrag to decide if two blocks pointed to by a
- * node are actually close by
- */
-static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
-{
- if (blocknr < other && other - (blocknr + blocksize) < 32768)
- return 1;
- if (blocknr > other && blocknr - (other + blocksize) < 32768)
- return 1;
- return 0;
-}
-
-#ifdef __LITTLE_ENDIAN
-
-/*
- * Compare two keys, on little-endian the disk order is same as CPU order and
- * we can avoid the conversion.
- */
-static int comp_keys(const struct btrfs_disk_key *disk_key,
- const struct btrfs_key *k2)
-{
- const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
-
- return btrfs_comp_cpu_keys(k1, k2);
-}
-
-#else
-
-/*
- * compare two keys in a memcmp fashion
- */
-static int comp_keys(const struct btrfs_disk_key *disk,
- const struct btrfs_key *k2)
-{
- struct btrfs_key k1;
-
- btrfs_disk_key_to_cpu(&k1, disk);
-
- return btrfs_comp_cpu_keys(&k1, k2);
-}
-#endif
-
-/*
* same as comp_keys only with two btrfs_key's
*/
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
@@ -794,105 +768,6 @@ int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_ke
}
/*
- * this is used by the defrag code to go through all the
- * leaves pointed to by a node and reallocate them so that
- * disk order is close to key order
- */
-int btrfs_realloc_node(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, u64 *last_ret,
- struct btrfs_key *progress)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct extent_buffer *cur;
- u64 blocknr;
- u64 search_start = *last_ret;
- u64 last_block = 0;
- u64 other;
- u32 parent_nritems;
- int end_slot;
- int i;
- int err = 0;
- u32 blocksize;
- int progress_passed = 0;
- struct btrfs_disk_key disk_key;
-
- /*
- * COWing must happen through a running transaction, which always
- * matches the current fs generation (it's a transaction with a state
- * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
- * into error state to prevent the commit of any transaction.
- */
- if (unlikely(trans->transaction != fs_info->running_transaction ||
- trans->transid != fs_info->generation)) {
- btrfs_abort_transaction(trans, -EUCLEAN);
- btrfs_crit(fs_info,
-"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
- parent->start, btrfs_root_id(root), trans->transid,
- fs_info->running_transaction->transid,
- fs_info->generation);
- return -EUCLEAN;
- }
-
- parent_nritems = btrfs_header_nritems(parent);
- blocksize = fs_info->nodesize;
- end_slot = parent_nritems - 1;
-
- if (parent_nritems <= 1)
- return 0;
-
- for (i = start_slot; i <= end_slot; i++) {
- int close = 1;
-
- btrfs_node_key(parent, &disk_key, i);
- if (!progress_passed && comp_keys(&disk_key, progress) < 0)
- continue;
-
- progress_passed = 1;
- blocknr = btrfs_node_blockptr(parent, i);
- if (last_block == 0)
- last_block = blocknr;
-
- if (i > 0) {
- other = btrfs_node_blockptr(parent, i - 1);
- close = close_blocks(blocknr, other, blocksize);
- }
- if (!close && i < end_slot) {
- other = btrfs_node_blockptr(parent, i + 1);
- close = close_blocks(blocknr, other, blocksize);
- }
- if (close) {
- last_block = blocknr;
- continue;
- }
-
- cur = btrfs_read_node_slot(parent, i);
- if (IS_ERR(cur))
- return PTR_ERR(cur);
- if (search_start == 0)
- search_start = last_block;
-
- btrfs_tree_lock(cur);
- err = __btrfs_cow_block(trans, root, cur, parent, i,
- &cur, search_start,
- min(16 * blocksize,
- (end_slot - i) * blocksize),
- BTRFS_NESTING_COW);
- if (err) {
- btrfs_tree_unlock(cur);
- free_extent_buffer(cur);
- break;
- }
- search_start = cur->start;
- last_block = cur->start;
- *last_ret = search_start;
- btrfs_tree_unlock(cur);
- free_extent_buffer(cur);
- }
- return err;
-}
-
-/*
* Search for a key in the given extent_buffer.
*
* The lower boundary for the search is specified by the slot number @first_slot.
@@ -958,7 +833,7 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
tmp = &unaligned;
}
- ret = comp_keys(tmp, key);
+ ret = btrfs_comp_keys(tmp, key);
if (ret < 0)
low = mid + 1;
@@ -973,19 +848,19 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
return 1;
}
-static void root_add_used(struct btrfs_root *root, u32 size)
+static void root_add_used_bytes(struct btrfs_root *root)
{
spin_lock(&root->accounting_lock);
btrfs_set_root_used(&root->root_item,
- btrfs_root_used(&root->root_item) + size);
+ btrfs_root_used(&root->root_item) + root->fs_info->nodesize);
spin_unlock(&root->accounting_lock);
}
-static void root_sub_used(struct btrfs_root *root, u32 size)
+static void root_sub_used_bytes(struct btrfs_root *root)
{
spin_lock(&root->accounting_lock);
btrfs_set_root_used(&root->root_item,
- btrfs_root_used(&root->root_item) - size);
+ btrfs_root_used(&root->root_item) - root->fs_info->nodesize);
spin_unlock(&root->accounting_lock);
}
@@ -1101,7 +976,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* once for the path */
free_extent_buffer(mid);
- root_sub_used(root, mid->len);
+ root_sub_used_bytes(root);
btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
/* once for the root ptr */
free_extent_buffer_stale(mid);
@@ -1171,7 +1046,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
right = NULL;
goto out;
}
- root_sub_used(root, right->len);
+ root_sub_used_bytes(root);
btrfs_free_tree_block(trans, btrfs_root_id(root), right,
0, 1);
free_extent_buffer_stale(right);
@@ -1186,7 +1061,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
btrfs_set_node_key(parent, &right_key, pslot + 1);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
}
}
if (btrfs_header_nritems(mid) == 1) {
@@ -1229,7 +1104,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
mid = NULL;
goto out;
}
- root_sub_used(root, mid->len);
+ root_sub_used_bytes(root);
btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
free_extent_buffer_stale(mid);
mid = NULL;
@@ -1244,7 +1119,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
btrfs_set_node_key(parent, &mid_key, pslot);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
}
/* update the path */
@@ -1351,7 +1226,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
return ret;
}
btrfs_set_node_key(parent, &disk_key, pslot);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
if (btrfs_header_nritems(left) > orig_slot) {
path->nodes[level] = left;
path->slots[level + 1] -= 1;
@@ -1411,7 +1286,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
return ret;
}
btrfs_set_node_key(parent, &disk_key, pslot + 1);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
if (btrfs_header_nritems(mid) <= orig_slot) {
path->nodes[level] = right;
@@ -1995,7 +1870,7 @@ static int search_leaf(struct btrfs_trans_handle *trans,
* the extent buffer's header and we have recently accessed
* the header's level field.
*/
- ret = comp_keys(&first_key, key);
+ ret = btrfs_comp_keys(&first_key, key);
if (ret < 0) {
/*
* The first key is smaller than the key we want
@@ -2080,8 +1955,8 @@ static int search_leaf(struct btrfs_trans_handle *trans,
}
/*
- * btrfs_search_slot - look for a key in a tree and perform necessary
- * modifications to preserve tree invariants.
+ * Look for a key in a tree and perform necessary modifications to preserve
+ * tree invariants.
*
* @trans: Handle of transaction, used when modifying the tree
* @p: Holds all btree nodes along the search path
@@ -2504,7 +2379,7 @@ static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
*/
if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
- ret = comp_keys(&found_key, &orig_key);
+ ret = btrfs_comp_keys(&found_key, &orig_key);
if (ret == 0) {
if (path->slots[0] > 0) {
path->slots[0]--;
@@ -2519,7 +2394,7 @@ static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
}
btrfs_item_key(path->nodes[0], &found_key, 0);
- ret = comp_keys(&found_key, &key);
+ ret = btrfs_comp_keys(&found_key, &key);
/*
* We might have had an item with the previous key in the tree right
* before we released our path. And after we released our path, that
@@ -2667,7 +2542,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
* higher levels
*
*/
-static void fixup_low_keys(struct btrfs_path *path,
+static void fixup_low_keys(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
struct btrfs_disk_key *key, int level)
{
int i;
@@ -2684,7 +2560,7 @@ static void fixup_low_keys(struct btrfs_path *path,
BTRFS_MOD_LOG_KEY_REPLACE);
BUG_ON(ret < 0);
btrfs_set_node_key(t, key, tslot);
- btrfs_mark_buffer_dirty(path->nodes[i]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[i]);
if (tslot != 0)
break;
}
@@ -2696,10 +2572,11 @@ static void fixup_low_keys(struct btrfs_path *path,
* This function isn't completely safe. It's the caller's responsibility
* that the new key won't break the order
*/
-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const struct btrfs_key *new_key)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_disk_key disk_key;
struct extent_buffer *eb;
int slot;
@@ -2708,7 +2585,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
slot = path->slots[0];
if (slot > 0) {
btrfs_item_key(eb, &disk_key, slot - 1);
- if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
+ if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) {
btrfs_print_leaf(eb);
btrfs_crit(fs_info,
"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
@@ -2722,7 +2599,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
}
if (slot < btrfs_header_nritems(eb) - 1) {
btrfs_item_key(eb, &disk_key, slot + 1);
- if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
+ if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) {
btrfs_print_leaf(eb);
btrfs_crit(fs_info,
"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
@@ -2737,9 +2614,9 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
btrfs_cpu_key_to_disk(&disk_key, new_key);
btrfs_set_item_key(eb, &disk_key, slot);
- btrfs_mark_buffer_dirty(eb);
+ btrfs_mark_buffer_dirty(trans, eb);
if (slot == 0)
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
/*
@@ -2870,8 +2747,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
}
btrfs_set_header_nritems(src, src_nritems - push_items);
btrfs_set_header_nritems(dst, dst_nritems + push_items);
- btrfs_mark_buffer_dirty(src);
- btrfs_mark_buffer_dirty(dst);
+ btrfs_mark_buffer_dirty(trans, src);
+ btrfs_mark_buffer_dirty(trans, dst);
return ret;
}
@@ -2946,8 +2823,8 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(src, src_nritems - push_items);
btrfs_set_header_nritems(dst, dst_nritems + push_items);
- btrfs_mark_buffer_dirty(src);
- btrfs_mark_buffer_dirty(dst);
+ btrfs_mark_buffer_dirty(trans, src);
+ btrfs_mark_buffer_dirty(trans, dst);
return ret;
}
@@ -2963,7 +2840,6 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int level)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
u64 lower_gen;
struct extent_buffer *lower;
struct extent_buffer *c;
@@ -2982,11 +2858,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&lower_key, level, root->node->start, 0,
- BTRFS_NESTING_NEW_ROOT);
+ 0, BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(c))
return PTR_ERR(c);
- root_add_used(root, fs_info->nodesize);
+ root_add_used_bytes(root);
btrfs_set_header_nritems(c, 1);
btrfs_set_node_key(c, &lower_key, 0);
@@ -2996,7 +2872,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_set_node_ptr_generation(c, 0, lower_gen);
- btrfs_mark_buffer_dirty(c);
+ btrfs_mark_buffer_dirty(trans, c);
old = root->node;
ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
@@ -3068,7 +2944,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
WARN_ON(trans->transid == 0);
btrfs_set_node_ptr_generation(lower, slot, trans->transid);
btrfs_set_header_nritems(lower, nritems + 1);
- btrfs_mark_buffer_dirty(lower);
+ btrfs_mark_buffer_dirty(trans, lower);
return 0;
}
@@ -3126,11 +3002,11 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&disk_key, level, c->start, 0,
- BTRFS_NESTING_SPLIT);
+ 0, BTRFS_NESTING_SPLIT);
if (IS_ERR(split))
return PTR_ERR(split);
- root_add_used(root, fs_info->nodesize);
+ root_add_used_bytes(root);
ASSERT(btrfs_header_level(c) == level);
ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
@@ -3147,8 +3023,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(split, c_nritems - mid);
btrfs_set_header_nritems(c, mid);
- btrfs_mark_buffer_dirty(c);
- btrfs_mark_buffer_dirty(split);
+ btrfs_mark_buffer_dirty(trans, c);
+ btrfs_mark_buffer_dirty(trans, split);
ret = insert_ptr(trans, path, &disk_key, split->start,
path->slots[level + 1] + 1, level + 1);
@@ -3314,15 +3190,15 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(left, left_nritems);
if (left_nritems)
- btrfs_mark_buffer_dirty(left);
+ btrfs_mark_buffer_dirty(trans, left);
else
btrfs_clear_buffer_dirty(trans, left);
- btrfs_mark_buffer_dirty(right);
+ btrfs_mark_buffer_dirty(trans, right);
btrfs_item_key(right, &disk_key, 0);
btrfs_set_node_key(upper, &disk_key, slot + 1);
- btrfs_mark_buffer_dirty(upper);
+ btrfs_mark_buffer_dirty(trans, upper);
/* then fixup the leaf pointer in the path */
if (path->slots[0] >= left_nritems) {
@@ -3534,14 +3410,14 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
btrfs_set_token_item_offset(&token, i, push_space);
}
- btrfs_mark_buffer_dirty(left);
+ btrfs_mark_buffer_dirty(trans, left);
if (right_nritems)
- btrfs_mark_buffer_dirty(right);
+ btrfs_mark_buffer_dirty(trans, right);
else
btrfs_clear_buffer_dirty(trans, right);
btrfs_item_key(right, &disk_key, 0);
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
@@ -3672,8 +3548,8 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
if (ret < 0)
return ret;
- btrfs_mark_buffer_dirty(right);
- btrfs_mark_buffer_dirty(l);
+ btrfs_mark_buffer_dirty(trans, right);
+ btrfs_mark_buffer_dirty(trans, l);
BUG_ON(path->slots[0] != slot);
if (mid <= slot) {
@@ -3877,13 +3753,13 @@ again:
* use BTRFS_NESTING_NEW_ROOT.
*/
right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
- &disk_key, 0, l->start, 0,
+ &disk_key, 0, l->start, 0, 0,
num_doubles ? BTRFS_NESTING_NEW_ROOT :
BTRFS_NESTING_SPLIT);
if (IS_ERR(right))
return PTR_ERR(right);
- root_add_used(root, fs_info->nodesize);
+ root_add_used_bytes(root);
if (split == 0) {
if (mid <= slot) {
@@ -3914,7 +3790,7 @@ again:
path->nodes[0] = right;
path->slots[0] = 0;
if (path->slots[1] == 0)
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
/*
* We create a new leaf 'right' for the required ins_len and
@@ -4013,7 +3889,8 @@ err:
return ret;
}
-static noinline int split_item(struct btrfs_path *path,
+static noinline int split_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
const struct btrfs_key *new_key,
unsigned long split_offset)
{
@@ -4072,7 +3949,7 @@ static noinline int split_item(struct btrfs_path *path,
write_extent_buffer(leaf, buf + split_offset,
btrfs_item_ptr_offset(leaf, slot),
item_size - split_offset);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
BUG_ON(btrfs_leaf_free_space(leaf) < 0);
kfree(buf);
@@ -4106,7 +3983,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- ret = split_item(path, new_key, split_offset);
+ ret = split_item(trans, path, new_key, split_offset);
return ret;
}
@@ -4116,7 +3993,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u32 new_size, int from_end)
{
int slot;
struct extent_buffer *leaf;
@@ -4192,11 +4070,11 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
btrfs_set_item_size(leaf, slot, new_size);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (btrfs_leaf_free_space(leaf) < 0) {
btrfs_print_leaf(leaf);
@@ -4207,7 +4085,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
/*
* make the item pointed to by the path bigger, data_size is the added size.
*/
-void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u32 data_size)
{
int slot;
struct extent_buffer *leaf;
@@ -4257,7 +4136,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
data_end = old_data;
old_size = btrfs_item_size(leaf, slot);
btrfs_set_item_size(leaf, slot, old_size + data_size);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (btrfs_leaf_free_space(leaf) < 0) {
btrfs_print_leaf(leaf);
@@ -4268,6 +4147,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
/*
* Make space in the node before inserting one or more items.
*
+ * @trans: transaction handle
* @root: root we are inserting items to
* @path: points to the leaf/slot where we are going to insert new items
* @batch: information about the batch of items to insert
@@ -4275,7 +4155,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
* Main purpose is to save stack depth by doing the bulk of the work in a
* function that doesn't call btrfs_search_slot
*/
-static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
+static void setup_items_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, struct btrfs_path *path,
const struct btrfs_item_batch *batch)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4295,7 +4176,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
*/
if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
@@ -4354,7 +4235,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
}
btrfs_set_header_nritems(leaf, nritems + batch->nr);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (btrfs_leaf_free_space(leaf) < 0) {
btrfs_print_leaf(leaf);
@@ -4365,12 +4246,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
/*
* Insert a new item into a leaf.
*
+ * @trans: Transaction handle.
* @root: The root of the btree.
* @path: A path pointing to the target leaf and slot.
* @key: The key of the new item.
* @data_size: The size of the data associated with the new key.
*/
-void btrfs_setup_item_for_insert(struct btrfs_root *root,
+void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct btrfs_path *path,
const struct btrfs_key *key,
u32 data_size)
@@ -4382,7 +4265,7 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
batch.total_data_size = data_size;
batch.nr = 1;
- setup_items_for_insert(root, path, &batch);
+ setup_items_for_insert(trans, root, path, &batch);
}
/*
@@ -4408,7 +4291,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
slot = path->slots[0];
BUG_ON(slot < 0);
- setup_items_for_insert(root, path, batch);
+ setup_items_for_insert(trans, root, path, batch);
return 0;
}
@@ -4433,7 +4316,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
leaf = path->nodes[0];
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, data, ptr, data_size);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
btrfs_free_path(path);
return ret;
@@ -4464,7 +4347,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
return ret;
path->slots[0]++;
- btrfs_setup_item_for_insert(root, path, new_key, item_size);
+ btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
leaf = path->nodes[0];
memcpy_extent_buffer(leaf,
btrfs_item_ptr_offset(leaf, path->slots[0]),
@@ -4522,9 +4405,9 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
- fixup_low_keys(path, &disk_key, level + 1);
+ fixup_low_keys(trans, path, &disk_key, level + 1);
}
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
return 0;
}
@@ -4556,7 +4439,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
*/
btrfs_unlock_up_safe(path, 0);
- root_sub_used(root, leaf->len);
+ root_sub_used_bytes(root);
atomic_inc(&leaf->refs);
btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
@@ -4621,7 +4504,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
- fixup_low_keys(path, &disk_key, 1);
+ fixup_low_keys(trans, path, &disk_key, 1);
}
/*
@@ -4686,11 +4569,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* dirtied this buffer
*/
if (path->nodes[0] == leaf)
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
free_extent_buffer(leaf);
}
} else {
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
}
return ret;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9419f4e37a58..196c005c31f6 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -6,37 +6,10 @@
#ifndef BTRFS_CTREE_H
#define BTRFS_CTREE_H
-#include <linux/mm.h>
-#include <linux/sched/signal.h>
-#include <linux/highmem.h>
-#include <linux/fs.h>
-#include <linux/rwsem.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/backing-dev.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <trace/events/btrfs.h>
-#include <asm/unaligned.h>
#include <linux/pagemap.h>
-#include <linux/btrfs.h>
-#include <linux/btrfs_tree.h>
-#include <linux/workqueue.h>
-#include <linux/security.h>
-#include <linux/sizes.h>
-#include <linux/dynamic_debug.h>
-#include <linux/refcount.h>
-#include <linux/crc32c.h>
-#include <linux/iomap.h>
-#include <linux/fscrypt.h>
-#include "extent-io-tree.h"
-#include "extent_io.h"
-#include "extent_map.h"
-#include "async-thread.h"
-#include "block-rsv.h"
#include "locking.h"
-#include "misc.h"
#include "fs.h"
+#include "accessors.h"
struct btrfs_trans_handle;
struct btrfs_transaction;
@@ -218,10 +191,22 @@ struct btrfs_root {
atomic_t log_commit[2];
/* Used only for log trees of subvolumes, not for the log root tree */
atomic_t log_batch;
+ /*
+ * Protected by the 'log_mutex' lock but can be read without holding
+ * that lock to avoid unnecessary lock contention, in which case it
+ * should be read using btrfs_get_root_log_transid() except if it's a
+ * log tree in which case it can be directly accessed. Updates to this
+ * field should always use btrfs_set_root_log_transid(), except for log
+ * trees where the field can be updated directly.
+ */
int log_transid;
/* No matter the commit succeeds or not*/
int log_transid_committed;
- /* Just be updated when the commit succeeds. */
+ /*
+ * Just be updated when the commit succeeds. Use
+ * btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
+ * to access this field.
+ */
int last_log_commit;
pid_t log_start_pid;
@@ -326,6 +311,9 @@ struct btrfs_root {
/* Used only by log trees, when logging csum items */
struct extent_io_tree log_csum_range;
+ /* Used in simple quotas, track root during relocation. */
+ u64 relocation_src_root;
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
u64 alloc_bytenr;
#endif
@@ -352,6 +340,26 @@ static inline u64 btrfs_root_id(const struct btrfs_root *root)
return root->root_key.objectid;
}
+static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
+{
+ return READ_ONCE(root->log_transid);
+}
+
+static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
+{
+ WRITE_ONCE(root->log_transid, log_transid);
+}
+
+static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
+{
+ return READ_ONCE(root->last_log_commit);
+}
+
+static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
+{
+ WRITE_ONCE(root->last_log_commit, commit_id);
+}
+
/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
@@ -470,30 +478,6 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
((bytes) >> (fs_info)->sectorsize_bits)
-static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
-{
- return crc32c(crc, address, length);
-}
-
-static inline void btrfs_crc32c_final(u32 crc, u8 *result)
-{
- put_unaligned_le32(~crc, result);
-}
-
-static inline u64 btrfs_name_hash(const char *name, int len)
-{
- return crc32c((u32)~1, name, len);
-}
-
-/*
- * Figure the key offset of an extended inode ref
- */
-static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
- int len)
-{
- return (u64) crc32c(parent_objectid, name, len);
-}
-
static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
{
return mapping_gfp_constraint(mapping, ~__GFP_FS);
@@ -513,12 +497,42 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot);
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
+
+#ifdef __LITTLE_ENDIAN
+
+/*
+ * Compare two keys, on little-endian the disk order is same as CPU order and
+ * we can avoid the conversion.
+ */
+static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
+ const struct btrfs_key *k2)
+{
+ const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
+
+ return btrfs_comp_cpu_keys(k1, k2);
+}
+
+#else
+
+/* Compare two keys in a memcmp fashion. */
+static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
+ const struct btrfs_key *k2)
+{
+ struct btrfs_key k1;
+
+ btrfs_disk_key_to_cpu(&k1, disk);
+
+ return btrfs_comp_cpu_keys(&k1, k2);
+}
+
+#endif
+
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
int type);
int btrfs_previous_extent_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid);
-void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
@@ -536,16 +550,26 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
enum btrfs_lock_nesting nest);
+int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *buf,
+ struct extent_buffer *parent, int parent_slot,
+ struct extent_buffer **cow_ret,
+ u64 search_start, u64 empty_size,
+ enum btrfs_lock_nesting nest);
int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer **cow_ret, u64 new_root_objectid);
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
-void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
-void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
+void btrfs_extend_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u32 data_size);
+void btrfs_truncate_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -566,10 +590,6 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
const struct btrfs_key *key,
struct btrfs_path *p, int find_higher,
int return_any);
-int btrfs_realloc_node(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, u64 *last_ret,
- struct btrfs_key *progress);
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
@@ -609,7 +629,8 @@ struct btrfs_item_batch {
int nr;
};
-void btrfs_setup_item_for_insert(struct btrfs_root *root,
+void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct btrfs_path *path,
const struct btrfs_key *key,
u32 data_size);
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index f2ff4cbe8656..5244561e2016 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -338,13 +338,118 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
}
/*
+ * Check if two blocks addresses are close, used by defrag.
+ */
+static bool close_blocks(u64 blocknr, u64 other, u32 blocksize)
+{
+ if (blocknr < other && other - (blocknr + blocksize) < SZ_32K)
+ return true;
+ if (blocknr > other && blocknr - (other + blocksize) < SZ_32K)
+ return true;
+ return false;
+}
+
+/*
+ * Go through all the leaves pointed to by a node and reallocate them so that
+ * disk order is close to key order.
+ */
+static int btrfs_realloc_node(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *parent,
+ int start_slot, u64 *last_ret,
+ struct btrfs_key *progress)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ const u32 blocksize = fs_info->nodesize;
+ const int end_slot = btrfs_header_nritems(parent) - 1;
+ u64 search_start = *last_ret;
+ u64 last_block = 0;
+ int ret = 0;
+ bool progress_passed = false;
+
+ /*
+ * COWing must happen through a running transaction, which always
+ * matches the current fs generation (it's a transaction with a state
+ * less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
+ * into error state to prevent the commit of any transaction.
+ */
+ if (unlikely(trans->transaction != fs_info->running_transaction ||
+ trans->transid != fs_info->generation)) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
+ parent->start, btrfs_root_id(root), trans->transid,
+ fs_info->running_transaction->transid,
+ fs_info->generation);
+ return -EUCLEAN;
+ }
+
+ if (btrfs_header_nritems(parent) <= 1)
+ return 0;
+
+ for (int i = start_slot; i <= end_slot; i++) {
+ struct extent_buffer *cur;
+ struct btrfs_disk_key disk_key;
+ u64 blocknr;
+ u64 other;
+ bool close = true;
+
+ btrfs_node_key(parent, &disk_key, i);
+ if (!progress_passed && btrfs_comp_keys(&disk_key, progress) < 0)
+ continue;
+
+ progress_passed = true;
+ blocknr = btrfs_node_blockptr(parent, i);
+ if (last_block == 0)
+ last_block = blocknr;
+
+ if (i > 0) {
+ other = btrfs_node_blockptr(parent, i - 1);
+ close = close_blocks(blocknr, other, blocksize);
+ }
+ if (!close && i < end_slot) {
+ other = btrfs_node_blockptr(parent, i + 1);
+ close = close_blocks(blocknr, other, blocksize);
+ }
+ if (close) {
+ last_block = blocknr;
+ continue;
+ }
+
+ cur = btrfs_read_node_slot(parent, i);
+ if (IS_ERR(cur))
+ return PTR_ERR(cur);
+ if (search_start == 0)
+ search_start = last_block;
+
+ btrfs_tree_lock(cur);
+ ret = btrfs_force_cow_block(trans, root, cur, parent, i,
+ &cur, search_start,
+ min(16 * blocksize,
+ (end_slot - i) * blocksize),
+ BTRFS_NESTING_COW);
+ if (ret) {
+ btrfs_tree_unlock(cur);
+ free_extent_buffer(cur);
+ break;
+ }
+ search_start = cur->start;
+ last_block = cur->start;
+ *last_ret = search_start;
+ btrfs_tree_unlock(cur);
+ free_extent_buffer(cur);
+ }
+ return ret;
+}
+
+/*
* Defrag all the leaves in a given btree.
* Read all the leaves and try to get key order to
* better reflect disk order
*/
-int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
{
struct btrfs_path *path = NULL;
struct btrfs_key key;
@@ -461,6 +566,45 @@ done:
}
/*
+ * Defrag a given btree. Every leaf in the btree is read and defragmented.
+ */
+int btrfs_defrag_root(struct btrfs_root *root)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
+
+ if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
+ return 0;
+
+ while (1) {
+ struct btrfs_trans_handle *trans;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+
+ ret = btrfs_defrag_leaves(trans, root);
+
+ btrfs_end_transaction(trans);
+ btrfs_btree_balance_dirty(fs_info);
+ cond_resched();
+
+ if (btrfs_fs_closing(fs_info) || ret != -EAGAIN)
+ break;
+
+ if (btrfs_defrag_cancelled(fs_info)) {
+ btrfs_debug(fs_info, "defrag_root cancelled");
+ ret = -EAGAIN;
+ break;
+ }
+ }
+ clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
+ return ret;
+}
+
+/*
* Defrag specific helper to get an extent map.
*
* Differences between this and btrfs_get_extent() are:
@@ -891,8 +1035,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* very likely resulting in a larger extent after writeback is
* triggered (except in a case of free space fragmentation).
*/
- if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
- EXTENT_DELALLOC, 0, NULL))
+ if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
+ EXTENT_DELALLOC))
goto next;
/*
diff --git a/fs/btrfs/defrag.h b/fs/btrfs/defrag.h
index 5305f2283b5e..5a62763528d1 100644
--- a/fs/btrfs/defrag.h
+++ b/fs/btrfs/defrag.h
@@ -12,7 +12,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u32 extent_thresh);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
-int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root);
+int btrfs_defrag_root(struct btrfs_root *root);
static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
{
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 427abaf608b8..51453d4928fa 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -322,9 +322,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
} else {
if (current->journal_info)
flush = BTRFS_RESERVE_FLUSH_LIMIT;
-
- if (btrfs_transaction_in_commit(fs_info))
- schedule_timeout(1);
}
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
@@ -346,7 +343,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
noflush);
if (ret)
return ret;
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, meta_reserve, flush);
+ ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
+ meta_reserve, flush);
if (ret) {
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
return ret;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 90aaedce1548..7381241334e8 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -328,7 +328,8 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
}
/*
- * __btrfs_lookup_delayed_item - look up the delayed item by key
+ * Look up the delayed item by key.
+ *
* @delayed_node: pointer to the delayed node
* @index: the dir index value to lookup (offset of a dir index key)
*
@@ -517,7 +518,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
/*
* For insertions we track reserved metadata space by accounting
* for the number of leaves that will be used, based on the delayed
- * node's index_items_size field.
+ * node's curr_index_batch_size and index_item_leaves fields.
*/
if (item->type == BTRFS_DELAYED_DELETION_ITEM)
item->bytes_reserved = num_bytes;
@@ -1030,7 +1031,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
sizeof(struct btrfs_inode_item));
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
goto out;
@@ -1378,8 +1379,7 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return -ENOMEM;
async_work->delayed_root = delayed_root;
- btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
- NULL);
+ btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
async_work->nr = nr;
btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
@@ -1760,8 +1760,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
}
/*
- * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
- *
+ * Read dir info stored in the delayed tree.
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
struct list_head *ins_list)
@@ -1834,24 +1833,22 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_stack_inode_block_group(inode_item, 0);
btrfs_set_stack_timespec_sec(&inode_item->atime,
- inode->i_atime.tv_sec);
+ inode_get_atime_sec(inode));
btrfs_set_stack_timespec_nsec(&inode_item->atime,
- inode->i_atime.tv_nsec);
+ inode_get_atime_nsec(inode));
btrfs_set_stack_timespec_sec(&inode_item->mtime,
- inode->i_mtime.tv_sec);
+ inode_get_mtime_sec(inode));
btrfs_set_stack_timespec_nsec(&inode_item->mtime,
- inode->i_mtime.tv_nsec);
+ inode_get_mtime_nsec(inode));
btrfs_set_stack_timespec_sec(&inode_item->ctime,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
btrfs_set_stack_timespec_nsec(&inode_item->ctime,
- inode_get_ctime(inode).tv_nsec);
+ inode_get_ctime_nsec(inode));
- btrfs_set_stack_timespec_sec(&inode_item->otime,
- BTRFS_I(inode)->i_otime.tv_sec);
- btrfs_set_stack_timespec_nsec(&inode_item->otime,
- BTRFS_I(inode)->i_otime.tv_nsec);
+ btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
+ btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
}
int btrfs_fill_inode(struct inode *inode, u32 *rdev)
@@ -1891,19 +1888,17 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
&BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
- inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
- inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
+ inode_set_atime(inode, btrfs_stack_timespec_sec(&inode_item->atime),
+ btrfs_stack_timespec_nsec(&inode_item->atime));
- inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
- inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
+ inode_set_mtime(inode, btrfs_stack_timespec_sec(&inode_item->mtime),
+ btrfs_stack_timespec_nsec(&inode_item->mtime));
inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
btrfs_stack_timespec_nsec(&inode_item->ctime));
- BTRFS_I(inode)->i_otime.tv_sec =
- btrfs_stack_timespec_sec(&inode_item->otime);
- BTRFS_I(inode)->i_otime.tv_nsec =
- btrfs_stack_timespec_nsec(&inode_item->otime);
+ BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
+ BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
inode->i_generation = BTRFS_I(inode)->generation;
BTRFS_I(inode)->index_cnt = (u64)-1;
@@ -1914,9 +1909,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
}
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_inode *inode)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_delayed_node *delayed_node;
int ret = 0;
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 1da213197f55..5cceb31bbd16 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -135,7 +135,6 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct btrfs_inode *inode);
int btrfs_fill_inode(struct inode *inode, u32 *rdev);
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 9fe4ccca50a0..9223934d95f4 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -57,16 +57,20 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
* Release a ref head's reservation.
*
* @fs_info: the filesystem
- * @nr: number of items to drop
+ * @nr_refs: number of delayed refs to drop
+ * @nr_csums: number of csum items to drop
*
* Drops the delayed ref head's count from the delayed refs rsv and free any
* excess reservation we had.
*/
-void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
- const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr);
- u64 released = 0;
+ u64 num_bytes;
+ u64 released;
+
+ num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
+ num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
if (released)
@@ -77,26 +81,118 @@ void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
/*
* Adjust the size of the delayed refs rsv.
*
- * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
- * it'll calculate the additional size and add it to the delayed_refs_rsv.
+ * This is to be called anytime we may have adjusted trans->delayed_ref_updates
+ * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
+ * add it to the delayed_refs_rsv.
*/
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
u64 num_bytes;
+ u64 reserved_bytes;
- if (!trans->delayed_ref_updates)
+ num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
+ num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
+ trans->delayed_ref_csum_deletions);
+
+ if (num_bytes == 0)
return;
- num_bytes = btrfs_calc_delayed_ref_bytes(fs_info,
- trans->delayed_ref_updates);
+ /*
+ * Try to take num_bytes from the transaction's local delayed reserve.
+ * If not possible, try to take as much as it's available. If the local
+ * reserve doesn't have enough reserved space, the delayed refs reserve
+ * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
+ * by someone or if a transaction commit is triggered before that, the
+ * global block reserve will be used. We want to minimize using the
+ * global block reserve for cases we can account for in advance, to
+ * avoid exhausting it and reach -ENOSPC during a transaction commit.
+ */
+ spin_lock(&local_rsv->lock);
+ reserved_bytes = min(num_bytes, local_rsv->reserved);
+ local_rsv->reserved -= reserved_bytes;
+ local_rsv->full = (local_rsv->reserved >= local_rsv->size);
+ spin_unlock(&local_rsv->lock);
spin_lock(&delayed_rsv->lock);
delayed_rsv->size += num_bytes;
- delayed_rsv->full = false;
+ delayed_rsv->reserved += reserved_bytes;
+ delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
spin_unlock(&delayed_rsv->lock);
trans->delayed_ref_updates = 0;
+ trans->delayed_ref_csum_deletions = 0;
+}
+
+/*
+ * Adjust the size of the delayed refs block reserve for 1 block group item
+ * insertion, used after allocating a block group.
+ */
+void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+
+ spin_lock(&delayed_rsv->lock);
+ /*
+ * Inserting a block group item does not require changing the free space
+ * tree, only the extent tree or the block group tree, so this is all we
+ * need.
+ */
+ delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
+ delayed_rsv->full = false;
+ spin_unlock(&delayed_rsv->lock);
+}
+
+/*
+ * Adjust the size of the delayed refs block reserve to release space for 1
+ * block group item insertion.
+ */
+void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
+ u64 released;
+
+ released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
+ if (released > 0)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
+ 0, released, 0);
+}
+
+/*
+ * Adjust the size of the delayed refs block reserve for 1 block group item
+ * update.
+ */
+void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+
+ spin_lock(&delayed_rsv->lock);
+ /*
+ * Updating a block group item does not result in new nodes/leaves and
+ * does not require changing the free space tree, only the extent tree
+ * or the block group tree, so this is all we need.
+ */
+ delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
+ delayed_rsv->full = false;
+ spin_unlock(&delayed_rsv->lock);
+}
+
+/*
+ * Adjust the size of the delayed refs block reserve to release space for 1
+ * block group item update.
+ */
+void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
+ u64 released;
+
+ released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
+ if (released > 0)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
+ 0, released, 0);
}
/*
@@ -154,6 +250,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_space_info *space_info = block_rsv->space_info;
u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 num_bytes = 0;
u64 refilled_bytes;
@@ -170,7 +267,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
if (!num_bytes)
return 0;
- ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
if (ret)
return ret;
@@ -199,8 +296,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
spin_unlock(&block_rsv->lock);
if (to_free > 0)
- btrfs_space_info_free_bytes_may_use(fs_info, block_rsv->space_info,
- to_free);
+ btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
if (refilled_bytes > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
@@ -422,7 +518,8 @@ int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
return 0;
}
-static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs,
+static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref)
{
@@ -433,9 +530,11 @@ static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs,
list_del(&ref->add_list);
btrfs_put_delayed_ref(ref);
atomic_dec(&delayed_refs->num_entries);
+ btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
-static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs,
+static bool merge_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref,
u64 seq)
@@ -464,10 +563,10 @@ static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs,
mod = -next->ref_mod;
}
- drop_delayed_ref(delayed_refs, head, next);
+ drop_delayed_ref(fs_info, delayed_refs, head, next);
ref->ref_mod += mod;
if (ref->ref_mod == 0) {
- drop_delayed_ref(delayed_refs, head, ref);
+ drop_delayed_ref(fs_info, delayed_refs, head, ref);
done = true;
} else {
/*
@@ -505,7 +604,7 @@ again:
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
if (seq && ref->seq >= seq)
continue;
- if (merge_ref(delayed_refs, head, ref, seq))
+ if (merge_ref(fs_info, delayed_refs, head, ref, seq))
goto again;
}
}
@@ -584,10 +683,11 @@ void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
* Return true if the ref was merged into an existing one (and therefore can be
* freed by the caller).
*/
-static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root,
+static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *ref)
{
+ struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
struct btrfs_delayed_ref_node *exist;
int mod;
@@ -598,6 +698,7 @@ static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root,
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
spin_unlock(&href->lock);
+ trans->delayed_ref_updates++;
return false;
}
@@ -626,7 +727,7 @@ static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root,
/* remove existing tail if its ref_mod is zero */
if (exist->ref_mod == 0)
- drop_delayed_ref(root, href, exist);
+ drop_delayed_ref(trans->fs_info, root, href, exist);
spin_unlock(&href->lock);
return true;
}
@@ -647,6 +748,15 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
BUG_ON(existing->is_data != update->is_data);
spin_lock(&existing->lock);
+
+ /*
+ * When freeing an extent, we may not know the owning root when we
+ * first create the head_ref. However, some deref before the last deref
+ * will know it, so we just need to update the head_ref accordingly.
+ */
+ if (!existing->owning_root)
+ existing->owning_root = update->owning_root;
+
if (update->must_insert_reserved) {
/* if the extent was freed and then
* reallocated before the delayed ref
@@ -656,6 +766,7 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
* Set it again here
*/
existing->must_insert_reserved = update->must_insert_reserved;
+ existing->owning_root = update->owning_root;
/*
* update the num_bytes so we make sure the accounting
@@ -695,6 +806,8 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
/*
* If we are going to from a positive ref mod to a negative or vice
* versa we need to make sure to adjust pending_csums accordingly.
+ * We reserve bytes for csum deletion when adding or updating a ref head
+ * see add_delayed_ref_head() for more details.
*/
if (existing->is_data) {
u64 csum_leaves =
@@ -703,11 +816,11 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
delayed_refs->pending_csums -= existing->num_bytes;
- btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
+ btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
}
if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
delayed_refs->pending_csums += existing->num_bytes;
- trans->delayed_ref_updates += csum_leaves;
+ trans->delayed_ref_csum_deletions += csum_leaves;
}
}
@@ -718,7 +831,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root,
u64 reserved, int action, bool is_data,
- bool is_system)
+ bool is_system, u64 owning_root)
{
int count_mod = 1;
bool must_insert_reserved = false;
@@ -758,7 +871,9 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
head_ref->bytenr = bytenr;
head_ref->num_bytes = num_bytes;
head_ref->ref_mod = count_mod;
+ head_ref->reserved_bytes = reserved;
head_ref->must_insert_reserved = must_insert_reserved;
+ head_ref->owning_root = owning_root;
head_ref->is_data = is_data;
head_ref->is_system = is_system;
head_ref->ref_tree = RB_ROOT_CACHED;
@@ -819,16 +934,21 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
+ /*
+ * We reserve the amount of bytes needed to delete csums when
+ * adding the ref head and not when adding individual drop refs
+ * since the csum items are deleted only after running the last
+ * delayed drop ref (the data extent's ref count drops to 0).
+ */
if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
- trans->delayed_ref_updates +=
+ trans->delayed_ref_csum_deletions +=
btrfs_csum_bytes_to_leaves(trans->fs_info,
head_ref->num_bytes);
}
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
- trans->delayed_ref_updates++;
}
if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted;
@@ -837,8 +957,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
}
/*
- * init_delayed_ref_common - Initialize the structure which represents a
- * modification to a an extent.
+ * Initialize the structure which represents a modification to a an extent.
*
* @fs_info: Internal to the mounted filesystem mount structure.
*
@@ -909,7 +1028,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 parent = generic_ref->parent;
u8 ref_type;
- is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
+ is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
@@ -922,8 +1041,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
return -ENOMEM;
}
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
- !generic_ref->skip_qgroup) {
+ if (btrfs_qgroup_enabled(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
@@ -938,15 +1056,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
ref_type = BTRFS_TREE_BLOCK_REF_KEY;
init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
- generic_ref->tree_ref.owning_root, action,
+ generic_ref->tree_ref.ref_root, action,
ref_type);
- ref->root = generic_ref->tree_ref.owning_root;
+ ref->root = generic_ref->tree_ref.ref_root;
ref->parent = parent;
ref->level = level;
init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
- generic_ref->tree_ref.owning_root, 0, action,
- false, is_system);
+ generic_ref->tree_ref.ref_root, 0, action,
+ false, is_system, generic_ref->owning_root);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
@@ -959,7 +1077,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
head_ref = add_delayed_ref_head(trans, head_ref, record,
action, &qrecord_inserted);
- merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
+ merged = insert_delayed_ref(trans, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
/*
@@ -998,7 +1116,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u64 bytenr = generic_ref->bytenr;
u64 num_bytes = generic_ref->len;
u64 parent = generic_ref->parent;
- u64 ref_root = generic_ref->data_ref.owning_root;
+ u64 ref_root = generic_ref->data_ref.ref_root;
u64 owner = generic_ref->data_ref.ino;
u64 offset = generic_ref->data_ref.offset;
u8 ref_type;
@@ -1026,8 +1144,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
return -ENOMEM;
}
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
- !generic_ref->skip_qgroup) {
+ if (btrfs_qgroup_enabled(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
@@ -1038,7 +1155,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
}
init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
- reserved, action, true, false);
+ reserved, action, true, false, generic_ref->owning_root);
head_ref->extent_op = NULL;
delayed_refs = &trans->transaction->delayed_refs;
@@ -1051,7 +1168,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
head_ref = add_delayed_ref_head(trans, head_ref, record,
action, &qrecord_inserted);
- merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
+ merged = insert_delayed_ref(trans, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
/*
@@ -1084,7 +1201,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
return -ENOMEM;
init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
- BTRFS_UPDATE_DELAYED_HEAD, false, false);
+ BTRFS_UPDATE_DELAYED_HEAD, false, false, 0);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index fd9bf2b709c0..62d679d40f4f 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -9,10 +9,16 @@
#include <linux/refcount.h>
/* these are the possible values of struct btrfs_delayed_ref_node->action */
-#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
-#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
-#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
-#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
+enum btrfs_delayed_ref_action {
+ /* Add one backref to the tree */
+ BTRFS_ADD_DELAYED_REF = 1,
+ /* Delete one backref from the tree */
+ BTRFS_DROP_DELAYED_REF,
+ /* Record a full extent allocation */
+ BTRFS_ADD_DELAYED_EXTENT,
+ /* Not changing ref count on head ref */
+ BTRFS_UPDATE_DELAYED_HEAD,
+} __packed;
struct btrfs_delayed_ref_node {
struct rb_node ref_node;
@@ -105,6 +111,18 @@ struct btrfs_delayed_ref_head {
int ref_mod;
/*
+ * The root that triggered the allocation when must_insert_reserved is
+ * set to true.
+ */
+ u64 owning_root;
+
+ /*
+ * Track reserved bytes when setting must_insert_reserved. On success
+ * or cleanup, we will need to free the reservation.
+ */
+ u64 reserved_bytes;
+
+ /*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
* until the delayed ref is processed. must_insert_reserved is
@@ -117,6 +135,7 @@ struct btrfs_delayed_ref_head {
* the free has happened.
*/
bool must_insert_reserved;
+
bool is_data;
bool is_system;
bool processing;
@@ -183,13 +202,13 @@ enum btrfs_ref_type {
BTRFS_REF_DATA,
BTRFS_REF_METADATA,
BTRFS_REF_LAST,
-};
+} __packed;
struct btrfs_data_ref {
/* For EXTENT_DATA_REF */
- /* Original root this data extent belongs to */
- u64 owning_root;
+ /* Root which owns this data reference. */
+ u64 ref_root;
/* Inode which refers to this data extent */
u64 ino;
@@ -212,18 +231,18 @@ struct btrfs_tree_ref {
int level;
/*
- * Root which owns this tree block.
+ * Root which owns this tree block reference.
*
* For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
*/
- u64 owning_root;
+ u64 ref_root;
/* For non-skinny metadata, no special member needed */
};
struct btrfs_ref {
enum btrfs_ref_type type;
- int action;
+ enum btrfs_delayed_ref_action action;
/*
* Whether this extent should go through qgroup record.
@@ -239,6 +258,7 @@ struct btrfs_ref {
#endif
u64 bytenr;
u64 len;
+ u64 owning_root;
/* Bytenr of the parent tree block */
u64 parent;
@@ -277,24 +297,37 @@ static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_in
return num_bytes;
}
+static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *fs_info,
+ int num_csum_items)
+{
+ /*
+ * Deleting csum items does not result in new nodes/leaves and does not
+ * require changing the free space tree, only the csum tree, so this is
+ * all we need.
+ */
+ return btrfs_calc_metadata_size(fs_info, num_csum_items);
+}
+
static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
- int action, u64 bytenr, u64 len, u64 parent)
+ int action, u64 bytenr, u64 len,
+ u64 parent, u64 owning_root)
{
generic_ref->action = action;
generic_ref->bytenr = bytenr;
generic_ref->len = len;
generic_ref->parent = parent;
+ generic_ref->owning_root = owning_root;
}
-static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
- int level, u64 root, u64 mod_root, bool skip_qgroup)
+static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level,
+ u64 root, u64 mod_root, bool skip_qgroup)
{
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
/* If @real_root not set, use @root as fallback */
generic_ref->real_root = mod_root ?: root;
#endif
generic_ref->tree_ref.level = level;
- generic_ref->tree_ref.owning_root = root;
+ generic_ref->tree_ref.ref_root = root;
generic_ref->type = BTRFS_REF_METADATA;
if (skip_qgroup || !(is_fstree(root) &&
(!mod_root || is_fstree(mod_root))))
@@ -312,7 +345,7 @@ static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
/* If @real_root not set, use @root as fallback */
generic_ref->real_root = mod_root ?: ref_root;
#endif
- generic_ref->data_ref.owning_root = ref_root;
+ generic_ref->data_ref.ref_root = ref_root;
generic_ref->data_ref.ino = ino;
generic_ref->data_ref.offset = offset;
generic_ref->type = BTRFS_REF_DATA;
@@ -338,7 +371,6 @@ btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{
- WARN_ON(refcount_read(&ref->refs) == 0);
if (refcount_dec_and_test(&ref->refs)) {
WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
switch (ref->type) {
@@ -402,8 +434,12 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
-void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums);
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
+void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
+void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
+void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
+void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index fff22ed55c42..f9544fda38e9 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -17,7 +17,6 @@
#include "print-tree.h"
#include "volumes.h"
#include "async-thread.h"
-#include "check-integrity.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "zoned.h"
@@ -247,6 +246,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
u64 devid = BTRFS_DEV_REPLACE_DEVID;
int ret = 0;
@@ -257,12 +257,13 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return -EINVAL;
}
- bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE,
- fs_info->bdev_holder, NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_path(device_path, BLK_OPEN_WRITE,
+ fs_info->bdev_holder, NULL);
+ if (IS_ERR(bdev_handle)) {
btrfs_err(fs_info, "target device %s is invalid!", device_path);
- return PTR_ERR(bdev);
+ return PTR_ERR(bdev_handle);
}
+ bdev = bdev_handle->bdev;
if (!btrfs_check_device_zone_type(fs_info, bdev)) {
btrfs_err(fs_info,
@@ -313,9 +314,9 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
device->commit_bytes_used = device->bytes_used;
device->fs_info = fs_info;
device->bdev = bdev;
+ device->bdev_handle = bdev_handle;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
- device->holder = fs_info->bdev_holder;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
device->fs_devices = fs_devices;
@@ -334,7 +335,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
return 0;
error:
- blkdev_put(bdev, fs_info->bdev_holder);
+ bdev_release(bdev_handle);
return ret;
}
@@ -442,7 +443,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
dev_replace->item_needs_writeback = 0;
up_write(&dev_replace->rwsem);
- btrfs_mark_buffer_dirty(eb);
+ btrfs_mark_buffer_dirty(trans, eb);
out:
btrfs_free_path(path);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index 082eb0e19598..9c07d5c3e5ad 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -38,7 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
- btrfs_extend_item(path, data_size);
+ btrfs_extend_item(trans, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
@@ -93,7 +93,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, name, name_ptr, name_len);
write_extent_buffer(leaf, data, data_ptr, data_len);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
return ret;
}
@@ -153,7 +153,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, name->name, name_ptr, name->len);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
second_insert:
/* FIXME, use some real flag for selecting the extra index */
@@ -439,7 +439,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
- btrfs_truncate_item(path, item_len - sub_item_len, 1);
+ btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
}
return ret;
}
diff --git a/fs/btrfs/dir-item.h b/fs/btrfs/dir-item.h
index aab4b7cc7fa0..e40a226373d7 100644
--- a/fs/btrfs/dir-item.h
+++ b/fs/btrfs/dir-item.h
@@ -3,6 +3,10 @@
#ifndef BTRFS_DIR_ITEM_H
#define BTRFS_DIR_ITEM_H
+#include <linux/crc32c.h>
+
+struct fscrypt_str;
+
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
const struct fscrypt_str *name);
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
@@ -39,4 +43,9 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
const char *name,
int name_len);
+static inline u64 btrfs_name_hash(const char *name, int len)
+{
+ return crc32c((u32)~1, name, len);
+}
+
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 68f60d50e1fd..401ea09ae4b8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -29,7 +29,6 @@
#include "tree-log.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
-#include "check-integrity.h"
#include "rcu-string.h"
#include "dev-replace.h"
#include "raid56.h"
@@ -245,6 +244,7 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
struct extent_buffer *eb = bbio->private;
struct btrfs_fs_info *fs_info = eb->fs_info;
u64 found_start = btrfs_header_bytenr(eb);
+ u64 last_trans;
u8 result[BTRFS_CSUM_SIZE];
int ret;
@@ -282,12 +282,12 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
* Also check the generation, the eb reached here must be newer than
* last committed. Or something seriously wrong happened.
*/
- if (unlikely(btrfs_header_generation(eb) <= fs_info->last_trans_committed)) {
+ last_trans = btrfs_get_last_trans_committed(fs_info);
+ if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
ret = -EUCLEAN;
btrfs_err(fs_info,
"block=%llu bad generation, have %llu expect > %llu",
- eb->start, btrfs_header_generation(eb),
- fs_info->last_trans_committed);
+ eb->start, btrfs_header_generation(eb), last_trans);
goto error;
}
write_extent_buffer(eb, result, 0, fs_info->csum_size);
@@ -318,9 +318,10 @@ static bool check_tree_block_fsid(struct extent_buffer *eb)
BTRFS_FSID_SIZE);
/*
- * alloc_fs_devices() copies the fsid into metadata_uuid if the
- * metadata_uuid is unset in the superblock, including for a seed device.
- * So, we can use fs_devices->metadata_uuid.
+ * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
+ * This is then overwritten by metadata_uuid if it is present in the
+ * device_list_add(). The same true for a seed device as well. So use of
+ * fs_devices::metadata_uuid is appropriate here.
*/
if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
return false;
@@ -675,9 +676,9 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
refcount_set(&root->refs, 1);
atomic_set(&root->snapshot_force_cow, 0);
atomic_set(&root->nr_swapfiles, 0);
- root->log_transid = 0;
+ btrfs_set_root_log_transid(root, 0);
root->log_transid_committed = -1;
- root->last_log_commit = 0;
+ btrfs_set_root_last_log_commit(root, 0);
root->anon_dev = 0;
if (!dummy) {
extent_io_tree_init(fs_info, &root->dirty_log_pages,
@@ -859,7 +860,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
root->root_key.offset = 0;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
- BTRFS_NESTING_NORMAL);
+ 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
leaf = NULL;
@@ -867,7 +868,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
}
root->node = leaf;
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
root->commit_root = btrfs_root_node(root);
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
@@ -936,13 +937,13 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
*/
leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
- NULL, 0, 0, 0, BTRFS_NESTING_NORMAL);
+ NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf))
return PTR_ERR(leaf);
root->node = leaf;
- btrfs_mark_buffer_dirty(root->node);
+ btrfs_mark_buffer_dirty(trans, root->node);
btrfs_tree_unlock(root->node);
return 0;
@@ -1004,9 +1005,9 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
WARN_ON(root->log_root);
root->log_root = log_root;
- root->log_transid = 0;
+ btrfs_set_root_log_transid(root, 0);
root->log_transid_committed = -1;
- root->last_log_commit = 0;
+ btrfs_set_root_last_log_commit(root, 0);
return 0;
}
@@ -1179,6 +1180,8 @@ static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
return btrfs_grab_root(fs_info->block_group_root);
case BTRFS_FREE_SPACE_TREE_OBJECTID:
return btrfs_grab_root(btrfs_global_root(fs_info, &key));
+ case BTRFS_RAID_STRIPE_TREE_OBJECTID:
+ return btrfs_grab_root(fs_info->stripe_root);
default:
return NULL;
}
@@ -1259,6 +1262,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
btrfs_put_root(fs_info->fs_root);
btrfs_put_root(fs_info->data_reloc_root);
btrfs_put_root(fs_info->block_group_root);
+ btrfs_put_root(fs_info->stripe_root);
btrfs_check_leaked_roots(fs_info);
btrfs_extent_buffer_leak_debug_check(fs_info);
kfree(fs_info->super_copy);
@@ -1402,7 +1406,8 @@ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
}
/*
- * btrfs_get_fs_root_commit_root - return a root for the given objectid
+ * Return a root for the given objectid.
+ *
* @fs_info: the fs_info
* @objectid: the objectid we need to lookup
*
@@ -1699,11 +1704,11 @@ static void backup_super_roots(struct btrfs_fs_info *info)
}
/*
- * read_backup_root - Reads a backup root based on the passed priority. Prio 0
- * is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
+ * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
+ * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
*
- * fs_info - filesystem whose backup roots need to be read
- * priority - priority of backup root required
+ * @fs_info: filesystem whose backup roots need to be read
+ * @priority: priority of backup root required
*
* Returns backup root index on success and -EINVAL otherwise.
*/
@@ -1803,6 +1808,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
free_root_extent_buffers(info->fs_root);
free_root_extent_buffers(info->data_reloc_root);
free_root_extent_buffers(info->block_group_root);
+ free_root_extent_buffers(info->stripe_root);
if (free_chunk_root)
free_root_extent_buffers(info->chunk_root);
}
@@ -2262,7 +2268,6 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
root = btrfs_read_tree_root(tree_root, &location);
if (!IS_ERR(root)) {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
- set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
fs_info->quota_root = root;
}
@@ -2279,6 +2284,20 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
fs_info->uuid_root = root;
}
+ if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
+ location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
+ root = btrfs_read_tree_root(tree_root, &location);
+ if (IS_ERR(root)) {
+ if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ } else {
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->stripe_root = root;
+ }
+ }
+
return 0;
out:
btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
@@ -2381,7 +2400,8 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
- if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
+ if (!fs_info->fs_devices->temp_fsid &&
+ memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
sb->fsid, fs_info->fs_devices->fsid);
@@ -2634,7 +2654,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
/* All successful */
fs_info->generation = btrfs_header_generation(tree_root->node);
- fs_info->last_trans_committed = fs_info->generation;
+ btrfs_set_last_trans_committed(fs_info, fs_info->generation);
fs_info->last_reloc_trans = 0;
/* Always begin writing backup roots after the one being used */
@@ -2735,9 +2755,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->ordered_root_lock);
btrfs_init_scrub(fs_info);
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- fs_info->check_integrity_print_mask = 0;
-#endif
btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(fs_info);
@@ -3157,7 +3174,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
u32 nodesize;
u32 stripesize;
u64 generation;
- u64 features;
u16 csum_type;
struct btrfs_super_block *disk_super;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -3239,15 +3255,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
disk_super = fs_info->super_copy;
-
- features = btrfs_super_flags(disk_super);
- if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
- features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
- btrfs_set_super_flags(disk_super, features);
- btrfs_info(fs_info,
- "found metadata UUID change in progress flag, clearing");
- }
-
memcpy(fs_info->super_for_commit, fs_info->super_copy,
sizeof(*fs_info->super_for_commit));
@@ -3509,18 +3516,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
"auto enabling async discard");
}
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
- ret = btrfsic_mount(fs_info, fs_devices,
- btrfs_test_opt(fs_info,
- CHECK_INTEGRITY_DATA) ? 1 : 0,
- fs_info->check_integrity_print_mask);
- if (ret)
- btrfs_warn(fs_info,
- "failed to initialize integrity check module: %d",
- ret);
- }
-#endif
ret = btrfs_read_qgroup_config(fs_info);
if (ret)
goto fail_trans_kthread;
@@ -3820,8 +3815,6 @@ static int write_dev_supers(struct btrfs_device *device,
*/
if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
bio->bi_opf |= REQ_FUA;
-
- btrfsic_check_bio(bio);
submit_bio(bio);
if (btrfs_advance_sb_log(device, i))
@@ -3917,28 +3910,11 @@ static void write_dev_flush(struct btrfs_device *device)
device->last_flush_error = BLK_STS_OK;
-#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- /*
- * When a disk has write caching disabled, we skip submission of a bio
- * with flush and sync requests before writing the superblock, since
- * it's not needed. However when the integrity checker is enabled, this
- * results in reports that there are metadata blocks referred by a
- * superblock that were not properly flushed. So don't skip the bio
- * submission only when the integrity checker is enabled for the sake
- * of simplicity, since this is a debug tool and not meant for use in
- * non-debug builds.
- */
- if (!bdev_write_cache(device->bdev))
- return;
-#endif
-
bio_init(bio, device->bdev, NULL, 0,
REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
bio->bi_end_io = btrfs_end_empty_barrier;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
-
- btrfsic_check_bio(bio);
submit_bio(bio);
set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
}
@@ -4414,16 +4390,12 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
iput(fs_info->btree_inode);
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
- btrfsic_unmount(fs_info->fs_devices);
-#endif
-
btrfs_mapping_tree_free(&fs_info->mapping_tree);
btrfs_close_devices(fs_info->fs_devices);
}
-void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
+void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
u64 transid = btrfs_header_generation(buf);
@@ -4437,21 +4409,16 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
return;
#endif
+ /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
+ ASSERT(trans->transid == fs_info->generation);
btrfs_assert_tree_write_locked(buf);
- if (transid != fs_info->generation)
- WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
- buf->start, transid, fs_info->generation);
- set_extent_buffer_dirty(buf);
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- /*
- * btrfs_check_leaf() won't check item data if we don't have WRITTEN
- * set, so this will only validate the basic structure of the items.
- */
- if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(buf)) {
- btrfs_print_leaf(buf);
- ASSERT(0);
+ if (unlikely(transid != fs_info->generation)) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+ btrfs_crit(fs_info,
+"dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
+ buf->start, transid, fs_info->generation);
}
-#endif
+ set_extent_buffer_dirty(buf);
}
static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
@@ -4611,6 +4578,7 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
list_del(&ref->add_list);
atomic_dec(&delayed_refs->num_entries);
btrfs_put_delayed_ref(ref);
+ btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
if (head->must_insert_reserved)
pin_bytes = true;
@@ -4808,7 +4776,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
spin_unlock(&cur_trans->dirty_bgs_lock);
btrfs_put_block_group(cache);
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 02b645744a82..50dab8f639dc 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -104,7 +104,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
}
void btrfs_put_root(struct btrfs_root *root);
-void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
+void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
+ struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic);
int btrfs_read_extent_buffer(struct extent_buffer *buf,
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index ff8e117a1ace..ea149be28dff 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -105,32 +105,40 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info,
lockdep_set_class(&tree->lock, &file_extent_tree_class);
}
+/*
+ * Empty an io tree, removing and freeing every extent state record from the
+ * tree. This should be called once we are sure no other task can access the
+ * tree anymore, so no tree updates happen after we empty the tree and there
+ * aren't any waiters on any extent state record (EXTENT_LOCKED bit is never
+ * set on any extent state when calling this function).
+ */
void extent_io_tree_release(struct extent_io_tree *tree)
{
+ struct rb_root root;
+ struct extent_state *state;
+ struct extent_state *tmp;
+
spin_lock(&tree->lock);
- /*
- * Do a single barrier for the waitqueue_active check here, the state
- * of the waitqueue should not change once extent_io_tree_release is
- * called.
- */
- smp_mb();
- while (!RB_EMPTY_ROOT(&tree->state)) {
- struct rb_node *node;
- struct extent_state *state;
-
- node = rb_first(&tree->state);
- state = rb_entry(node, struct extent_state, rb_node);
- rb_erase(&state->rb_node, &tree->state);
+ root = tree->state;
+ tree->state = RB_ROOT;
+ rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) {
+ /* Clear node to keep free_extent_state() happy. */
RB_CLEAR_NODE(&state->rb_node);
+ ASSERT(!(state->state & EXTENT_LOCKED));
/*
- * btree io trees aren't supposed to have tasks waiting for
- * changes in the flags of extent states ever.
+ * No need for a memory barrier here, as we are holding the tree
+ * lock and we only change the waitqueue while holding that lock
+ * (see wait_extent_bit()).
*/
ASSERT(!waitqueue_active(&state->wq));
free_extent_state(state);
-
cond_resched_lock(&tree->lock);
}
+ /*
+ * Should still be empty even after a reschedule, no other task should
+ * be accessing the tree anymore.
+ */
+ ASSERT(RB_EMPTY_ROOT(&tree->state));
spin_unlock(&tree->lock);
}
@@ -327,6 +335,36 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
"locking error: extent tree was modified by another thread while locked");
}
+static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *state)
+{
+ struct extent_state *prev;
+
+ prev = prev_state(state);
+ if (prev && prev->end == state->start - 1 && prev->state == state->state) {
+ if (tree->inode)
+ btrfs_merge_delalloc_extent(tree->inode, state, prev);
+ state->start = prev->start;
+ rb_erase(&prev->rb_node, &tree->state);
+ RB_CLEAR_NODE(&prev->rb_node);
+ free_extent_state(prev);
+ }
+}
+
+static void merge_next_state(struct extent_io_tree *tree, struct extent_state *state)
+{
+ struct extent_state *next;
+
+ next = next_state(state);
+ if (next && next->start == state->end + 1 && next->state == state->state) {
+ if (tree->inode)
+ btrfs_merge_delalloc_extent(tree->inode, state, next);
+ state->end = next->end;
+ rb_erase(&next->rb_node, &tree->state);
+ RB_CLEAR_NODE(&next->rb_node);
+ free_extent_state(next);
+ }
+}
+
/*
* Utility function to look for merge candidates inside a given range. Any
* extents with matching state are merged together into a single extent in the
@@ -338,31 +376,11 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
*/
static void merge_state(struct extent_io_tree *tree, struct extent_state *state)
{
- struct extent_state *other;
-
if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
return;
- other = prev_state(state);
- if (other && other->end == state->start - 1 &&
- other->state == state->state) {
- if (tree->inode)
- btrfs_merge_delalloc_extent(tree->inode, state, other);
- state->start = other->start;
- rb_erase(&other->rb_node, &tree->state);
- RB_CLEAR_NODE(&other->rb_node);
- free_extent_state(other);
- }
- other = next_state(state);
- if (other && other->start == state->end + 1 &&
- other->state == state->state) {
- if (tree->inode)
- btrfs_merge_delalloc_extent(tree->inode, state, other);
- state->end = other->end;
- rb_erase(&other->rb_node, &tree->state);
- RB_CLEAR_NODE(&other->rb_node);
- free_extent_state(other);
- }
+ merge_prev_state(tree, state);
+ merge_next_state(tree, state);
}
static void set_state_bits(struct extent_io_tree *tree,
@@ -384,19 +402,27 @@ static void set_state_bits(struct extent_io_tree *tree,
* Insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted.
*
- * This may return -EEXIST if the extent is already there, in which case the
- * state struct is freed.
+ * Returns a pointer to the struct extent_state record containing the range
+ * requested for insertion, which may be the same as the given struct or it
+ * may be an existing record in the tree that was expanded to accommodate the
+ * requested range. In case of an extent_state different from the one that was
+ * given, the later can be freed or reused by the caller.
+ *
+ * On error it returns an error pointer.
*
* The tree lock is not taken internally. This is a utility function and
* probably isn't what you want to call (see set/clear_extent_bit).
*/
-static int insert_state(struct extent_io_tree *tree,
- struct extent_state *state,
- u32 bits, struct extent_changeset *changeset)
+static struct extent_state *insert_state(struct extent_io_tree *tree,
+ struct extent_state *state,
+ u32 bits,
+ struct extent_changeset *changeset)
{
struct rb_node **node;
struct rb_node *parent = NULL;
- const u64 end = state->end;
+ const u64 start = state->start - 1;
+ const u64 end = state->end + 1;
+ const bool try_merge = !(bits & (EXTENT_LOCKED | EXTENT_BOUNDARY));
set_state_bits(tree, state, bits, changeset);
@@ -407,23 +433,42 @@ static int insert_state(struct extent_io_tree *tree,
parent = *node;
entry = rb_entry(parent, struct extent_state, rb_node);
- if (end < entry->start) {
+ if (state->end < entry->start) {
+ if (try_merge && end == entry->start &&
+ state->state == entry->state) {
+ if (tree->inode)
+ btrfs_merge_delalloc_extent(tree->inode,
+ state, entry);
+ entry->start = state->start;
+ merge_prev_state(tree, entry);
+ state->state = 0;
+ return entry;
+ }
node = &(*node)->rb_left;
- } else if (end > entry->end) {
+ } else if (state->end > entry->end) {
+ if (try_merge && entry->end == start &&
+ state->state == entry->state) {
+ if (tree->inode)
+ btrfs_merge_delalloc_extent(tree->inode,
+ state, entry);
+ entry->end = state->end;
+ merge_next_state(tree, entry);
+ state->state = 0;
+ return entry;
+ }
node = &(*node)->rb_right;
} else {
btrfs_err(tree->fs_info,
"found node %llu %llu on insert of %llu %llu",
- entry->start, entry->end, state->start, end);
- return -EEXIST;
+ entry->start, entry->end, state->start, state->end);
+ return ERR_PTR(-EEXIST);
}
}
rb_link_node(&state->rb_node, parent, node);
rb_insert_color(&state->rb_node, &tree->state);
- merge_state(tree, state);
- return 0;
+ return state;
}
/*
@@ -708,26 +753,13 @@ out:
}
-static void wait_on_state(struct extent_io_tree *tree,
- struct extent_state *state)
- __releases(tree->lock)
- __acquires(tree->lock)
-{
- DEFINE_WAIT(wait);
- prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&tree->lock);
- schedule();
- spin_lock(&tree->lock);
- finish_wait(&state->wq, &wait);
-}
-
/*
* Wait for one or more bits to clear on a range in the state tree.
* The range [start, end] is inclusive.
* The tree lock is taken by this function
*/
-void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
- struct extent_state **cached_state)
+static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+ u32 bits, struct extent_state **cached_state)
{
struct extent_state *state;
@@ -758,9 +790,15 @@ process_node:
goto out;
if (state->state & bits) {
+ DEFINE_WAIT(wait);
+
start = state->start;
refcount_inc(&state->refs);
- wait_on_state(tree, state);
+ prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock(&tree->lock);
+ schedule();
+ spin_lock(&tree->lock);
+ finish_wait(&state->wq, &wait);
free_extent_state(state);
goto again;
}
@@ -847,10 +885,19 @@ bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
if (state->end == start - 1 && extent_state_in_tree(state)) {
while ((state = next_state(state)) != NULL) {
if (state->state & bits)
- goto got_it;
+ break;
}
+ /*
+ * If we found the next extent state, clear cached_state
+ * so that we can cache the next extent state below and
+ * avoid future calls going over the same extent state
+ * again. If we haven't found any, clear as well since
+ * it's now useless.
+ */
free_extent_state(*cached_state);
*cached_state = NULL;
+ if (state)
+ goto got_it;
goto out;
}
free_extent_state(*cached_state);
@@ -1133,6 +1180,8 @@ hit_next:
*/
if (state->start > start) {
u64 this_end;
+ struct extent_state *inserted_state;
+
if (end < last_start)
this_end = end;
else
@@ -1148,12 +1197,15 @@ hit_next:
*/
prealloc->start = start;
prealloc->end = this_end;
- err = insert_state(tree, prealloc, bits, changeset);
- if (err)
+ inserted_state = insert_state(tree, prealloc, bits, changeset);
+ if (IS_ERR(inserted_state)) {
+ err = PTR_ERR(inserted_state);
extent_io_tree_panic(tree, err);
+ }
- cache_state(prealloc, cached_state);
- prealloc = NULL;
+ cache_state(inserted_state, cached_state);
+ if (inserted_state == prealloc)
+ prealloc = NULL;
start = this_end + 1;
goto search_again;
}
@@ -1356,6 +1408,8 @@ hit_next:
*/
if (state->start > start) {
u64 this_end;
+ struct extent_state *inserted_state;
+
if (end < last_start)
this_end = end;
else
@@ -1373,11 +1427,14 @@ hit_next:
*/
prealloc->start = start;
prealloc->end = this_end;
- err = insert_state(tree, prealloc, bits, NULL);
- if (err)
+ inserted_state = insert_state(tree, prealloc, bits, NULL);
+ if (IS_ERR(inserted_state)) {
+ err = PTR_ERR(inserted_state);
extent_io_tree_panic(tree, err);
- cache_state(prealloc, cached_state);
- prealloc = NULL;
+ }
+ cache_state(inserted_state, cached_state);
+ if (inserted_state == prealloc)
+ prealloc = NULL;
start = this_end + 1;
goto search_again;
}
@@ -1640,15 +1697,46 @@ search:
}
/*
- * Search a range in the state tree for a given mask. If 'filled' == 1, this
- * returns 1 only if every extent in the tree has the bits set. Otherwise, 1
- * is returned if any bit in the range is found set.
+ * Check if the single @bit exists in the given range.
+ */
+bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
+{
+ struct extent_state *state = NULL;
+ bool bitset = false;
+
+ ASSERT(is_power_of_2(bit));
+
+ spin_lock(&tree->lock);
+ state = tree_search(tree, start);
+ while (state && start <= end) {
+ if (state->start > end)
+ break;
+
+ if (state->state & bit) {
+ bitset = true;
+ break;
+ }
+
+ /* If state->end is (u64)-1, start will overflow to 0 */
+ start = state->end + 1;
+ if (start > end || start == 0)
+ break;
+ state = next_state(state);
+ }
+ spin_unlock(&tree->lock);
+ return bitset;
+}
+
+/*
+ * Check if the whole range [@start,@end) contains the single @bit set.
*/
-int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int filled, struct extent_state *cached)
+bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
+ struct extent_state *cached)
{
struct extent_state *state = NULL;
- int bitset = 0;
+ bool bitset = true;
+
+ ASSERT(is_power_of_2(bit));
spin_lock(&tree->lock);
if (cached && extent_state_in_tree(cached) && cached->start <= start &&
@@ -1657,35 +1745,35 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
else
state = tree_search(tree, start);
while (state && start <= end) {
- if (filled && state->start > start) {
- bitset = 0;
+ if (state->start > start) {
+ bitset = false;
break;
}
if (state->start > end)
break;
- if (state->state & bits) {
- bitset = 1;
- if (!filled)
- break;
- } else if (filled) {
- bitset = 0;
+ if ((state->state & bit) == 0) {
+ bitset = false;
break;
}
if (state->end == (u64)-1)
break;
+ /*
+ * Last entry (if state->end is (u64)-1 and overflow happens),
+ * or next entry starts after the range.
+ */
start = state->end + 1;
- if (start > end)
+ if (start > end || start == 0)
break;
state = next_state(state);
}
/* We ran out of states and were still inside of our range. */
- if (filled && !state)
- bitset = 0;
+ if (!state)
+ bitset = false;
spin_unlock(&tree->lock);
return bitset;
}
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index 28c23a23d121..5602b0137fcd 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -131,8 +131,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
struct extent_state **cached_state);
void free_extent_state(struct extent_state *state);
-int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- u32 bits, int filled, struct extent_state *cached_state);
+bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
+ struct extent_state *cached_state);
+bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -192,7 +193,5 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
u64 *end, u64 max_bytes,
struct extent_state **cached_state);
-void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
- struct extent_state **cached_state);
#endif /* BTRFS_EXTENT_IO_TREE_H */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index fc313fce5bbd..c8e5b4715b49 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -42,14 +42,16 @@
#include "file-item.h"
#include "orphan.h"
#include "tree-checker.h"
+#include "raid-stripe-tree.h"
#undef SCRAMBLE_DELAYED_REFS
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
+ u64 owner_offset,
struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
@@ -57,7 +59,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
- struct btrfs_key *ins, int ref_mod);
+ struct btrfs_key *ins, int ref_mod, u64 oref_root);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op);
@@ -344,9 +346,15 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
struct btrfs_extent_inline_ref *iref,
enum btrfs_inline_ref_type is_data)
{
+ struct btrfs_fs_info *fs_info = eb->fs_info;
int type = btrfs_extent_inline_ref_type(eb, iref);
u64 offset = btrfs_extent_inline_ref_offset(eb, iref);
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ return type;
+ }
+
if (type == BTRFS_TREE_BLOCK_REF_KEY ||
type == BTRFS_SHARED_BLOCK_REF_KEY ||
type == BTRFS_SHARED_DATA_REF_KEY ||
@@ -355,26 +363,25 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_TREE_BLOCK_REF_KEY)
return type;
if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
- ASSERT(eb->fs_info);
+ ASSERT(fs_info);
/*
* Every shared one has parent tree block,
* which must be aligned to sector size.
*/
- if (offset &&
- IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ if (offset && IS_ALIGNED(offset, fs_info->sectorsize))
return type;
}
} else if (is_data == BTRFS_REF_TYPE_DATA) {
if (type == BTRFS_EXTENT_DATA_REF_KEY)
return type;
if (type == BTRFS_SHARED_DATA_REF_KEY) {
- ASSERT(eb->fs_info);
+ ASSERT(fs_info);
/*
* Every shared one has parent tree block,
* which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ IS_ALIGNED(offset, fs_info->sectorsize))
return type;
}
} else {
@@ -385,7 +392,7 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
WARN_ON(1);
btrfs_print_leaf(eb);
- btrfs_err(eb->fs_info,
+ btrfs_err(fs_info,
"eb %llu iref 0x%lx invalid extent inline ref type %d",
eb->start, (unsigned long)iref, type);
@@ -399,11 +406,11 @@ u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
__le64 lenum;
lenum = cpu_to_le64(root_objectid);
- high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
+ high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(owner);
- low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+ low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
lenum = cpu_to_le64(offset);
- low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
+ low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
return ((u64)high_crc << 31) ^ (u64)low_crc;
}
@@ -575,7 +582,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
}
}
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
fail:
btrfs_release_path(path);
@@ -623,7 +630,7 @@ static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
return ret;
}
@@ -789,7 +796,6 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
int type;
int want;
int ret;
- int err = 0;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
int needed;
@@ -816,10 +822,8 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
again:
ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
/*
* We may be a newly converted file system which still has the old fat
@@ -846,7 +850,7 @@ again:
}
if (ret && !insert) {
- err = -ENOENT;
+ ret = -ENOENT;
goto out;
} else if (WARN_ON(ret)) {
btrfs_print_leaf(path->nodes[0]);
@@ -854,18 +858,18 @@ again:
"extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu",
bytenr, num_bytes, parent, root_objectid, owner,
offset);
- err = -EIO;
+ ret = -EUCLEAN;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size(leaf, path->slots[0]);
if (unlikely(item_size < sizeof(*ei))) {
- err = -EUCLEAN;
+ ret = -EUCLEAN;
btrfs_err(fs_info,
"unexpected extent item size, has %llu expect >= %zu",
item_size, sizeof(*ei));
- btrfs_abort_transaction(trans, err);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -885,22 +889,17 @@ again:
else
needed = BTRFS_REF_TYPE_BLOCK;
- err = -ENOENT;
- while (1) {
- if (ptr >= end) {
- if (ptr > end) {
- err = -EUCLEAN;
- btrfs_print_leaf(path->nodes[0]);
- btrfs_crit(fs_info,
-"overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu",
- path->slots[0], root_objectid, owner, offset, parent);
- }
- break;
- }
+ ret = -ENOENT;
+ while (ptr < end) {
iref = (struct btrfs_extent_inline_ref *)ptr;
type = btrfs_get_extent_inline_ref_type(leaf, iref, needed);
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ ptr += btrfs_extent_inline_ref_size(type);
+ continue;
+ }
if (type == BTRFS_REF_TYPE_INVALID) {
- err = -EUCLEAN;
+ ret = -EUCLEAN;
goto out;
}
@@ -916,7 +915,7 @@ again:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (match_extent_data_ref(leaf, dref, root_objectid,
owner, offset)) {
- err = 0;
+ ret = 0;
break;
}
if (hash_extent_data_ref_item(leaf, dref) <
@@ -927,14 +926,14 @@ again:
ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
if (parent > 0) {
if (parent == ref_offset) {
- err = 0;
+ ret = 0;
break;
}
if (ref_offset < parent)
break;
} else {
if (root_objectid == ref_offset) {
- err = 0;
+ ret = 0;
break;
}
if (ref_offset < root_objectid)
@@ -943,10 +942,20 @@ again:
}
ptr += btrfs_extent_inline_ref_size(type);
}
- if (err == -ENOENT && insert) {
+
+ if (unlikely(ptr > end)) {
+ ret = -EUCLEAN;
+ btrfs_print_leaf(path->nodes[0]);
+ btrfs_crit(fs_info,
+"overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu",
+ path->slots[0], root_objectid, owner, offset, parent);
+ goto out;
+ }
+
+ if (ret == -ENOENT && insert) {
if (item_size + extra_size >=
BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
- err = -EAGAIN;
+ ret = -EAGAIN;
goto out;
}
/*
@@ -958,7 +967,7 @@ again:
if (find_next_key(path, 0, &key) == 0 &&
key.objectid == bytenr &&
key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
- err = -EAGAIN;
+ ret = -EAGAIN;
goto out;
}
}
@@ -969,14 +978,14 @@ out:
path->search_for_extension = 0;
btrfs_unlock_up_safe(path, 1);
}
- return err;
+ return ret;
}
/*
* helper to add new inline back ref
*/
static noinline_for_stack
-void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
+void setup_inline_extent_backref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
u64 parent, u64 root_objectid,
@@ -999,7 +1008,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
type = extent_ref_type(parent, owner);
size = btrfs_extent_inline_ref_size(type);
- btrfs_extend_item(path, size);
+ btrfs_extend_item(trans, path, size);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei);
@@ -1033,7 +1042,7 @@ void setup_inline_extent_backref(struct btrfs_fs_info *fs_info,
} else {
btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
}
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
static int lookup_extent_backref(struct btrfs_trans_handle *trans,
@@ -1066,7 +1075,9 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
/*
* helper to update/remove inline back ref
*/
-static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
+static noinline_for_stack int update_inline_extent_backref(
+ struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref,
int refs_to_mod,
struct btrfs_delayed_extent_op *extent_op)
@@ -1174,9 +1185,9 @@ static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *pa
memmove_extent_buffer(leaf, ptr, ptr + size,
end - ptr - size);
item_size -= size;
- btrfs_truncate_item(path, item_size, 1);
+ btrfs_truncate_item(trans, path, item_size, 1);
}
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
return 0;
}
@@ -1206,9 +1217,10 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
bytenr, num_bytes, root_objectid, path->slots[0]);
return -EUCLEAN;
}
- ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
+ ret = update_inline_extent_backref(trans, path, iref,
+ refs_to_add, extent_op);
} else if (ret == -ENOENT) {
- setup_inline_extent_backref(trans->fs_info, path, iref, parent,
+ setup_inline_extent_backref(trans, path, iref, parent,
root_objectid, owner, offset,
refs_to_add, extent_op);
ret = 0;
@@ -1226,7 +1238,8 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
BUG_ON(!is_data && refs_to_drop != 1);
if (iref)
- ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
+ ret = update_inline_extent_backref(trans, path, iref,
+ -refs_to_drop, NULL);
else if (is_data)
ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
else
@@ -1422,7 +1435,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
generic_ref->action);
BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
- generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID);
+ generic_ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID);
if (generic_ref->type == BTRFS_REF_METADATA)
ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
@@ -1435,7 +1448,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
}
/*
- * __btrfs_inc_extent_ref - insert backreference for a given extent
+ * Insert backreference for a given extent.
*
* The counterpart is in __btrfs_free_extent(), with examples and more details
* how it works.
@@ -1465,8 +1478,6 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
* always passed as 0. For data extents it is the fileoffset
* this extent belongs to.
*
- * @refs_to_add Number of references to add
- *
* @extent_op Pointer to a structure, holding information necessary when
* updating a tree block's flags
*
@@ -1474,7 +1485,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
u64 parent, u64 root_objectid,
- u64 owner, u64 offset, int refs_to_add,
+ u64 owner, u64 offset,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_path *path;
@@ -1484,6 +1495,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
u64 refs;
+ int refs_to_add = node->ref_mod;
int ret;
path = btrfs_alloc_path();
@@ -1510,7 +1522,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
if (extent_op)
__run_delayed_extent_op(extent_op, leaf, item);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/* now insert the actual backref */
@@ -1530,44 +1542,57 @@ out:
}
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
bool insert_reserved)
{
int ret = 0;
struct btrfs_delayed_data_ref *ref;
- struct btrfs_key ins;
u64 parent = 0;
- u64 ref_root = 0;
u64 flags = 0;
- ins.objectid = node->bytenr;
- ins.offset = node->num_bytes;
- ins.type = BTRFS_EXTENT_ITEM_KEY;
-
ref = btrfs_delayed_node_to_data_ref(node);
trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
- ref_root = ref->root;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
+ struct btrfs_key key;
+ struct btrfs_squota_delta delta = {
+ .root = href->owning_root,
+ .num_bytes = node->num_bytes,
+ .rsv_bytes = href->reserved_bytes,
+ .is_data = true,
+ .is_inc = true,
+ .generation = trans->transid,
+ };
+
if (extent_op)
flags |= extent_op->flags_to_set;
- ret = alloc_reserved_file_extent(trans, parent, ref_root,
+
+ key.objectid = node->bytenr;
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+ key.offset = node->num_bytes;
+
+ ret = alloc_reserved_file_extent(trans, parent, ref->root,
flags, ref->objectid,
- ref->offset, &ins,
- node->ref_mod);
+ ref->offset, &key,
+ node->ref_mod, href->owning_root);
+ if (!ret)
+ ret = btrfs_record_squota_delta(trans->fs_info, &delta);
+ else
+ btrfs_qgroup_free_refroot(trans->fs_info, delta.root,
+ delta.rsv_bytes, BTRFS_QGROUP_RSV_DATA);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
+ ret = __btrfs_inc_extent_ref(trans, node, parent, ref->root,
ref->objectid, ref->offset,
- node->ref_mod, extent_op);
+ extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, node, parent,
- ref_root, ref->objectid,
- ref->offset, node->ref_mod,
- extent_op);
+ ret = __btrfs_free_extent(trans, href, node, parent,
+ ref->root, ref->objectid,
+ ref->offset, extent_op);
} else {
BUG();
}
@@ -1604,7 +1629,6 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
u32 item_size;
int ret;
- int err = 0;
int metadata = 1;
if (TRANS_ABORTED(trans))
@@ -1631,10 +1655,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
again:
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0) {
- err = ret;
goto out;
- }
- if (ret > 0) {
+ } else if (ret > 0) {
if (metadata) {
if (path->slots[0] > 0) {
path->slots[0]--;
@@ -1655,7 +1677,7 @@ again:
goto again;
}
} else {
- err = -EUCLEAN;
+ ret = -EUCLEAN;
btrfs_err(fs_info,
"missing extent item for extent %llu num_bytes %llu level %d",
head->bytenr, head->num_bytes, extent_op->level);
@@ -1667,29 +1689,31 @@ again:
item_size = btrfs_item_size(leaf, path->slots[0]);
if (unlikely(item_size < sizeof(*ei))) {
- err = -EUCLEAN;
+ ret = -EUCLEAN;
btrfs_err(fs_info,
"unexpected extent item size, has %u expect >= %zu",
item_size, sizeof(*ei));
- btrfs_abort_transaction(trans, err);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
__run_delayed_extent_op(extent_op, leaf, ei);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
- return err;
+ return ret;
}
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
bool insert_reserved)
{
int ret = 0;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_tree_ref *ref;
u64 parent = 0;
u64 ref_root = 0;
@@ -1709,14 +1733,25 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
return -EUCLEAN;
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
+ struct btrfs_squota_delta delta = {
+ .root = href->owning_root,
+ .num_bytes = fs_info->nodesize,
+ .rsv_bytes = 0,
+ .is_data = false,
+ .is_inc = true,
+ .generation = trans->transid,
+ };
+
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, node, extent_op);
+ if (!ret)
+ btrfs_record_squota_delta(fs_info, &delta);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
- ref->level, 0, 1, extent_op);
+ ref->level, 0, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, node, parent, ref_root,
- ref->level, 0, 1, extent_op);
+ ret = __btrfs_free_extent(trans, href, node, parent, ref_root,
+ ref->level, 0, extent_op);
} else {
BUG();
}
@@ -1725,6 +1760,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
/* helper function to actually process a single delayed ref entry */
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op,
bool insert_reserved)
@@ -1739,12 +1775,14 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = run_delayed_tree_ref(trans, node, extent_op,
+ ret = run_delayed_tree_ref(trans, href, node, extent_op,
insert_reserved);
else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
node->type == BTRFS_SHARED_DATA_REF_KEY)
- ret = run_delayed_data_ref(trans, node, extent_op,
+ ret = run_delayed_data_ref(trans, href, node, extent_op,
insert_reserved);
+ else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY)
+ ret = 0;
else
BUG();
if (ret && insert_reserved)
@@ -1823,28 +1861,37 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
return ret ? ret : 1;
}
-void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head)
{
- int nr_items = 1; /* Dropping this ref head update. */
-
/*
* We had csum deletions accounted for in our delayed refs rsv, we need
* to drop the csum leaves for this update from our delayed_refs_rsv.
*/
if (head->total_ref_mod < 0 && head->is_data) {
+ int nr_csums;
+
spin_lock(&delayed_refs->lock);
delayed_refs->pending_csums -= head->num_bytes;
spin_unlock(&delayed_refs->lock);
- nr_items += btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
+ nr_csums = btrfs_csum_bytes_to_leaves(fs_info, head->num_bytes);
+
+ btrfs_delayed_refs_rsv_release(fs_info, 0, nr_csums);
+
+ return btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
}
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE &&
+ head->must_insert_reserved && head->is_data)
+ btrfs_qgroup_free_refroot(fs_info, head->owning_root,
+ head->reserved_bytes, BTRFS_QGROUP_RSV_DATA);
- btrfs_delayed_refs_rsv_release(fs_info, nr_items);
+ return 0;
}
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head)
+ struct btrfs_delayed_ref_head *head,
+ u64 *bytes_released)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -1889,7 +1936,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
}
}
- btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
+ *bytes_released += btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
@@ -1931,7 +1978,8 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
}
static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *locked_ref)
+ struct btrfs_delayed_ref_head *locked_ref,
+ u64 *bytes_released)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -1985,8 +2033,10 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
locked_ref->extent_op = NULL;
spin_unlock(&locked_ref->lock);
- ret = run_one_delayed_ref(trans, ref, extent_op,
+ ret = run_one_delayed_ref(trans, locked_ref, ref, extent_op,
must_insert_reserved);
+ btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
+ *bytes_released += btrfs_calc_delayed_ref_bytes(fs_info, 1);
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
@@ -2010,15 +2060,22 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
*/
static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- unsigned long nr)
+ u64 min_bytes)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *locked_ref = NULL;
int ret;
unsigned long count = 0;
+ unsigned long max_count = 0;
+ u64 bytes_processed = 0;
delayed_refs = &trans->transaction->delayed_refs;
+ if (min_bytes == 0) {
+ max_count = delayed_refs->num_heads_ready;
+ min_bytes = U64_MAX;
+ }
+
do {
if (!locked_ref) {
locked_ref = btrfs_obtain_ref_head(trans);
@@ -2046,7 +2103,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_lock(&locked_ref->lock);
btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
- ret = btrfs_run_delayed_refs_for_head(trans, locked_ref);
+ ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, &bytes_processed);
if (ret < 0 && ret != -EAGAIN) {
/*
* Error, btrfs_run_delayed_refs_for_head already
@@ -2058,7 +2115,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
* Success, perform the usual cleanup of a processed
* head
*/
- ret = cleanup_ref_head(trans, locked_ref);
+ ret = cleanup_ref_head(trans, locked_ref, &bytes_processed);
if (ret > 0 ) {
/* We dropped our lock, we need to loop. */
ret = 0;
@@ -2075,7 +2132,9 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
locked_ref = NULL;
cond_resched();
- } while ((nr != -1 && count < nr) || locked_ref);
+ } while ((min_bytes != U64_MAX && bytes_processed < min_bytes) ||
+ (max_count > 0 && count < max_count) ||
+ locked_ref);
return 0;
}
@@ -2124,24 +2183,25 @@ static u64 find_middle(struct rb_root *root)
#endif
/*
- * this starts processing the delayed reference count updates and
- * extent insertions we have queued up so far. count can be
- * 0, which means to process everything in the tree at the start
- * of the run (but not newly added entries), or it can be some target
- * number you'd like to process.
+ * Start processing the delayed reference count updates and extent insertions
+ * we have queued up so far.
+ *
+ * @trans: Transaction handle.
+ * @min_bytes: How many bytes of delayed references to process. After this
+ * many bytes we stop processing delayed references if there are
+ * any more. If 0 it means to run all existing delayed references,
+ * but not new ones added after running all existing ones.
+ * Use (u64)-1 (U64_MAX) to run all existing delayed references
+ * plus any new ones that are added.
*
* Returns 0 on success or if called with an aborted transaction
* Returns <0 on error and aborts the transaction
*/
-int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
- unsigned long count)
+int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_head *head;
int ret;
- int run_all = count == (unsigned long)-1;
/* We'll clean this up in btrfs_cleanup_transaction */
if (TRANS_ABORTED(trans))
@@ -2151,42 +2211,30 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
return 0;
delayed_refs = &trans->transaction->delayed_refs;
- if (count == 0)
- count = delayed_refs->num_heads_ready;
-
again:
#ifdef SCRAMBLE_DELAYED_REFS
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
- ret = __btrfs_run_delayed_refs(trans, count);
+ ret = __btrfs_run_delayed_refs(trans, min_bytes);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
return ret;
}
- if (run_all) {
+ if (min_bytes == U64_MAX) {
btrfs_create_pending_block_groups(trans);
spin_lock(&delayed_refs->lock);
- node = rb_first_cached(&delayed_refs->href_root);
- if (!node) {
+ if (RB_EMPTY_ROOT(&delayed_refs->href_root.rb_root)) {
spin_unlock(&delayed_refs->lock);
- goto out;
+ return 0;
}
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
- refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
- /* Mutex was contended, block until it's released and retry. */
- mutex_lock(&head->mutex);
- mutex_unlock(&head->mutex);
-
- btrfs_put_delayed_ref_head(head);
cond_resched();
goto again;
}
-out:
+
return 0;
}
@@ -2311,6 +2359,7 @@ static noinline int check_committed_ref(struct btrfs_root *root,
struct btrfs_extent_item *ei;
struct btrfs_key key;
u32 item_size;
+ u32 expected_size;
int type;
int ret;
@@ -2337,10 +2386,22 @@ static noinline int check_committed_ref(struct btrfs_root *root,
ret = 1;
item_size = btrfs_item_size(leaf, path->slots[0]);
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
+ expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY);
+
+ /* No inline refs; we need to bail before checking for owner ref. */
+ if (item_size == sizeof(*ei))
+ goto out;
+
+ /* Check for an owner ref; skip over it to the real inline refs. */
+ iref = (struct btrfs_extent_inline_ref *)(ei + 1);
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
+ if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
+ iref = (struct btrfs_extent_inline_ref *)(iref + 1);
+ }
/* If extent item has more than 1 inline ref then it's shared */
- if (item_size != sizeof(*ei) +
- btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
+ if (item_size != expected_size)
goto out;
/*
@@ -2352,8 +2413,6 @@ static noinline int check_committed_ref(struct btrfs_root *root,
btrfs_root_last_snapshot(&root->root_item)))
goto out;
- iref = (struct btrfs_extent_inline_ref *)(ei + 1);
-
/* If this extent has SHARED_DATA_REF then it's shared */
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_DATA);
if (type != BTRFS_EXTENT_DATA_REF_KEY)
@@ -2450,7 +2509,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
btrfs_init_generic_ref(&generic_ref, action, bytenr,
- num_bytes, parent);
+ num_bytes, parent, ref_root);
btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
key.offset, root->root_key.objectid,
for_reloc);
@@ -2463,8 +2522,9 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
} else {
bytenr = btrfs_node_blockptr(buf, i);
num_bytes = fs_info->nodesize;
+ /* We don't know the owning_root, use 0. */
btrfs_init_generic_ref(&generic_ref, action, bytenr,
- num_bytes, parent);
+ num_bytes, parent, 0);
btrfs_init_tree_ref(&generic_ref, level - 1, ref_root,
root->root_key.objectid, for_reloc);
if (inc)
@@ -2565,16 +2625,13 @@ int btrfs_pin_extent(struct btrfs_trans_handle *trans,
return 0;
}
-/*
- * this function must be called within transaction
- */
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes)
+ const struct extent_buffer *eb)
{
struct btrfs_block_group *cache;
int ret;
- cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
+ cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
if (!cache)
return -EINVAL;
@@ -2586,10 +2643,10 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- pin_down_extent(trans, cache, bytenr, num_bytes, 0);
+ pin_down_extent(trans, cache, eb->start, eb->len, 0);
/* remove us from the free space cache (if we're there at all) */
- ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
+ ret = btrfs_remove_free_space(cache, eb->start, eb->len);
out:
btrfs_put_block_group(cache);
return ret;
@@ -2844,12 +2901,61 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
return 0;
}
+/*
+ * Parse an extent item's inline extents looking for a simple quotas owner ref.
+ *
+ * @fs_info: the btrfs_fs_info for this mount
+ * @leaf: a leaf in the extent tree containing the extent item
+ * @slot: the slot in the leaf where the extent item is found
+ *
+ * Returns the objectid of the root that originally allocated the extent item
+ * if the inline owner ref is expected and present, otherwise 0.
+ *
+ * If an extent item has an owner ref item, it will be the first inline ref
+ * item. Therefore the logic is to check whether there are any inline ref
+ * items, then check the type of the first one.
+ */
+u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf, int slot)
+{
+ struct btrfs_extent_item *ei;
+ struct btrfs_extent_inline_ref *iref;
+ struct btrfs_extent_owner_ref *oref;
+ unsigned long ptr;
+ unsigned long end;
+ int type;
+
+ if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA))
+ return 0;
+
+ ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+ ptr = (unsigned long)(ei + 1);
+ end = (unsigned long)ei + btrfs_item_size(leaf, slot);
+
+ /* No inline ref items of any kind, can't check type. */
+ if (ptr == end)
+ return 0;
+
+ iref = (struct btrfs_extent_inline_ref *)ptr;
+ type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
+
+ /* We found an owner ref, get the root out of it. */
+ if (type == BTRFS_EXTENT_OWNER_REF_KEY) {
+ oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
+ return btrfs_extent_owner_ref_root_id(leaf, oref);
+ }
+
+ /* We have inline refs, but not an owner ref. */
+ return 0;
+}
+
static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes, bool is_data)
+ u64 bytenr, struct btrfs_squota_delta *delta)
{
int ret;
+ u64 num_bytes = delta->num_bytes;
- if (is_data) {
+ if (delta->is_data) {
struct btrfs_root *csum_root;
csum_root = btrfs_csum_root(trans->fs_info, bytenr);
@@ -2858,6 +2964,18 @@ static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
btrfs_abort_transaction(trans, ret);
return ret;
}
+
+ ret = btrfs_delete_raid_extent(trans, bytenr, num_bytes);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
+ }
+ }
+
+ ret = btrfs_record_squota_delta(trans->fs_info, delta);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ return ret;
}
ret = add_to_free_space_tree(trans, bytenr, num_bytes);
@@ -2940,9 +3058,10 @@ static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
* And that (13631488 EXTENT_DATA_REF <HASH>) gets removed.
*/
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
- u64 owner_offset, int refs_to_drop,
+ u64 owner_offset,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *info = trans->fs_info;
@@ -2957,11 +3076,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
int extent_slot = 0;
int found_extent = 0;
int num_to_del = 1;
+ int refs_to_drop = node->ref_mod;
u32 item_size;
u64 refs;
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
+ u64 delayed_ref_root = href->owning_root;
extent_root = btrfs_extent_root(info, bytenr);
ASSERT(extent_root);
@@ -3151,7 +3272,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
} else {
btrfs_set_extent_refs(leaf, ei, refs);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
if (found_extent) {
ret = remove_extent_backref(trans, extent_root, path,
@@ -3162,6 +3283,15 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
}
} else {
+ struct btrfs_squota_delta delta = {
+ .root = delayed_ref_root,
+ .num_bytes = num_bytes,
+ .rsv_bytes = 0,
+ .is_data = is_data,
+ .is_inc = false,
+ .generation = btrfs_extent_generation(leaf, ei),
+ };
+
/* In this branch refs == 1 */
if (found_extent) {
if (is_data && refs_to_drop !=
@@ -3200,6 +3330,16 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
num_to_del = 2;
}
}
+ /*
+ * We can't infer the data owner from the delayed ref, so we need
+ * to try to get it from the owning ref item.
+ *
+ * If it is not present, then that extent was not written under
+ * simple quotas mode, so we don't need to account for its deletion.
+ */
+ if (is_data)
+ delta.root = btrfs_get_extent_owner_root(trans->fs_info,
+ leaf, extent_slot);
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
@@ -3209,7 +3349,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- ret = do_free_extent_accounting(trans, bytenr, num_bytes, is_data);
+ ret = do_free_extent_accounting(trans, bytenr, &delta);
}
btrfs_release_path(path);
@@ -3283,7 +3423,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
int ret;
btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
- buf->start, buf->len, parent);
+ buf->start, buf->len, parent, btrfs_header_owner(buf));
btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
root_id, 0, false);
@@ -3370,10 +3510,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
* tree, just update pinning info and exit early.
*/
if ((ref->type == BTRFS_REF_METADATA &&
- ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
+ ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) ||
(ref->type == BTRFS_REF_DATA &&
- ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) {
- /* unlocks the pinned mutex */
+ ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
ret = 0;
} else if (ref->type == BTRFS_REF_METADATA) {
@@ -3383,9 +3522,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
}
if (!((ref->type == BTRFS_REF_METADATA &&
- ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
+ ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) ||
(ref->type == BTRFS_REF_DATA &&
- ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)))
+ ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
btrfs_ref_tree_mod(fs_info, ref);
return ret;
@@ -4442,8 +4581,8 @@ loop:
}
/*
- * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
- * hole that is at least as big as @num_bytes.
+ * Entry point to the extent allocator. Tries to find a hole that is at least
+ * as big as @num_bytes.
*
* @root - The root that will contain this extent
*
@@ -4562,20 +4701,20 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
- u64 len)
+int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
+ const struct extent_buffer *eb)
{
struct btrfs_block_group *cache;
int ret = 0;
- cache = btrfs_lookup_block_group(trans->fs_info, start);
+ cache = btrfs_lookup_block_group(trans->fs_info, eb->start);
if (!cache) {
btrfs_err(trans->fs_info, "unable to find block group for %llu",
- start);
+ eb->start);
return -ENOSPC;
}
- ret = pin_down_extent(trans, cache, start, len, 1);
+ ret = pin_down_extent(trans, cache, eb->start, eb->len, 1);
btrfs_put_block_group(cache);
return ret;
}
@@ -4605,24 +4744,29 @@ static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr,
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 parent, u64 root_objectid,
u64 flags, u64 owner, u64 offset,
- struct btrfs_key *ins, int ref_mod)
+ struct btrfs_key *ins, int ref_mod, u64 oref_root)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *extent_root;
int ret;
struct btrfs_extent_item *extent_item;
+ struct btrfs_extent_owner_ref *oref;
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
struct extent_buffer *leaf;
int type;
u32 size;
+ const bool simple_quota = (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE);
if (parent > 0)
type = BTRFS_SHARED_DATA_REF_KEY;
else
type = BTRFS_EXTENT_DATA_REF_KEY;
- size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
+ size = sizeof(*extent_item);
+ if (simple_quota)
+ size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY);
+ size += btrfs_extent_inline_ref_size(type);
path = btrfs_alloc_path();
if (!path)
@@ -4644,7 +4788,14 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
flags | BTRFS_EXTENT_FLAG_DATA);
iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
+ if (simple_quota) {
+ btrfs_set_extent_inline_ref_type(leaf, iref, BTRFS_EXTENT_OWNER_REF_KEY);
+ oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
+ btrfs_set_extent_owner_ref_root_id(leaf, oref, oref_root);
+ iref = (struct btrfs_extent_inline_ref *)(oref + 1);
+ }
btrfs_set_extent_inline_ref_type(leaf, iref, type);
+
if (parent > 0) {
struct btrfs_shared_data_ref *ref;
ref = (struct btrfs_shared_data_ref *)(iref + 1);
@@ -4659,7 +4810,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
}
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_free_path(path);
return alloc_reserved_extent(trans, ins->objectid, ins->offset);
@@ -4734,7 +4885,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
}
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
return alloc_reserved_extent(trans, node->bytenr, fs_info->nodesize);
@@ -4746,12 +4897,17 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_key *ins)
{
struct btrfs_ref generic_ref = { 0 };
+ u64 root_objectid = root->root_key.objectid;
+ u64 owning_root = root_objectid;
- BUG_ON(root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
+
+ if (btrfs_is_data_reloc_root(root) && is_fstree(root->relocation_src_root))
+ owning_root = root->relocation_src_root;
btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
- ins->objectid, ins->offset, 0);
- btrfs_init_data_ref(&generic_ref, root->root_key.objectid, owner,
+ ins->objectid, ins->offset, 0, owning_root);
+ btrfs_init_data_ref(&generic_ref, root_objectid, owner,
offset, 0, false);
btrfs_ref_tree_mod(root->fs_info, &generic_ref);
@@ -4771,6 +4927,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
int ret;
struct btrfs_block_group *block_group;
struct btrfs_space_info *space_info;
+ struct btrfs_squota_delta delta = {
+ .root = root_objectid,
+ .num_bytes = ins->offset,
+ .generation = trans->transid,
+ .rsv_bytes = 0,
+ .is_data = true,
+ .is_inc = true,
+ };
/*
* Mixed block groups will exclude before processing the log so we only
@@ -4796,13 +4960,36 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
spin_unlock(&space_info->lock);
ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
- offset, ins, 1);
+ offset, ins, 1, root_objectid);
if (ret)
btrfs_pin_extent(trans, ins->objectid, ins->offset, 1);
+ ret = btrfs_record_squota_delta(fs_info, &delta);
btrfs_put_block_group(block_group);
return ret;
}
+#ifdef CONFIG_BTRFS_DEBUG
+/*
+ * Extra safety check in case the extent tree is corrupted and extent allocator
+ * chooses to use a tree block which is already used and locked.
+ */
+static bool check_eb_lock_owner(const struct extent_buffer *eb)
+{
+ if (eb->lock_owner == current->pid) {
+ btrfs_err_rl(eb->fs_info,
+"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
+ eb->start, btrfs_header_owner(eb), current->pid);
+ return true;
+ }
+ return false;
+}
+#else
+static bool check_eb_lock_owner(struct extent_buffer *eb)
+{
+ return false;
+}
+#endif
+
static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u64 bytenr, int level, u64 owner,
@@ -4816,15 +5003,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (IS_ERR(buf))
return buf;
- /*
- * Extra safety check in case the extent tree is corrupted and extent
- * allocator chooses to use a tree block which is already used and
- * locked.
- */
- if (buf->lock_owner == current->pid) {
- btrfs_err_rl(fs_info,
-"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
- buf->start, btrfs_header_owner(buf), current->pid);
+ if (check_eb_lock_owner(buf)) {
free_extent_buffer(buf);
return ERR_PTR(-EUCLEAN);
}
@@ -4901,6 +5080,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
const struct btrfs_disk_key *key,
int level, u64 hint,
u64 empty_size,
+ u64 reloc_src_root,
enum btrfs_lock_nesting nest)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4913,6 +5093,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
int ret;
u32 blocksize = fs_info->nodesize;
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
+ u64 owning_root;
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
if (btrfs_is_testing(fs_info)) {
@@ -4939,11 +5120,13 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
ret = PTR_ERR(buf);
goto out_free_reserved;
}
+ owning_root = btrfs_header_owner(buf);
if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent == 0)
parent = ins.objectid;
flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
+ owning_root = reloc_src_root;
} else
BUG_ON(parent > 0);
@@ -4963,7 +5146,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
extent_op->level = level;
btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
- ins.objectid, ins.offset, parent);
+ ins.objectid, ins.offset, parent, owning_root);
btrfs_init_tree_ref(&generic_ref, level, root_objectid,
root->root_key.objectid, false);
btrfs_ref_tree_mod(fs_info, &generic_ref);
@@ -5384,7 +5567,8 @@ skip:
find_next_key(path, level, &wc->drop_progress);
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- fs_info->nodesize, parent);
+ fs_info->nodesize, parent,
+ btrfs_header_owner(next));
btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid,
0, false);
ret = btrfs_free_extent(trans, &ref);
diff --git a/fs/btrfs/extent-tree.h b/fs/btrfs/extent-tree.h
index 88c249c37516..0716f65d9753 100644
--- a/fs/btrfs/extent-tree.h
+++ b/fs/btrfs/extent-tree.h
@@ -7,6 +7,7 @@
#include "block-group.h"
struct btrfs_free_cluster;
+struct btrfs_delayed_ref_head;
enum btrfs_extent_allocation_policy {
BTRFS_EXTENT_ALLOC_CLUSTERED,
@@ -91,8 +92,8 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
-int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count);
-void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
+int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes);
+u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
@@ -102,7 +103,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
int reserved);
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- u64 bytenr, u64 num_bytes);
+ const struct extent_buffer *eb);
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr, bool strict,
@@ -113,6 +114,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
const struct btrfs_disk_key *key,
int level, u64 hint,
u64 empty_size,
+ u64 reloc_src_root,
enum btrfs_lock_nesting nest);
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
u64 root_id,
@@ -136,12 +138,15 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, u64 flags);
int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
+u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf, int slot);
int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc);
-int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, u64 len);
+int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
+ const struct extent_buffer *eb);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref);
-int __must_check btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
+int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
int for_reloc);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index caccd0376342..03cef28d9e37 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -21,7 +21,6 @@
#include "ctree.h"
#include "btrfs_inode.h"
#include "bio.h"
-#include "check-integrity.h"
#include "locking.h"
#include "rcu-string.h"
#include "backref.h"
@@ -395,7 +394,7 @@ again:
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
- EXTENT_DELALLOC, 1, cached_state);
+ EXTENT_DELALLOC, cached_state);
if (!ret) {
unlock_extent(tree, delalloc_start, delalloc_end,
&cached_state);
@@ -2294,7 +2293,7 @@ static int try_release_extent_state(struct extent_io_tree *tree,
u64 end = start + PAGE_SIZE - 1;
int ret = 1;
- if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
+ if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
ret = 0;
} else {
u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
@@ -2353,9 +2352,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
free_extent_map(em);
break;
}
- if (test_range_bit(tree, em->start,
- extent_map_end(em) - 1,
- EXTENT_LOCKED, 0, NULL))
+ if (test_range_bit_exists(tree, em->start,
+ extent_map_end(em) - 1,
+ EXTENT_LOCKED))
goto next;
/*
* If it's not in the list of modified extents, used
@@ -3455,6 +3454,12 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
start, fs_info->nodesize);
return -EINVAL;
}
+ if (!IS_ALIGNED(start, fs_info->nodesize) &&
+ !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
+ btrfs_warn(fs_info,
+"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
+ start, fs_info->nodesize);
+ }
return 0;
}
@@ -4248,14 +4253,14 @@ void copy_extent_buffer(const struct extent_buffer *dst,
}
/*
- * eb_bitmap_offset() - calculate the page and offset of the byte containing the
- * given bit number
- * @eb: the extent buffer
- * @start: offset of the bitmap item in the extent buffer
- * @nr: bit number
- * @page_index: return index of the page in the extent buffer that contains the
- * given bit number
- * @page_offset: return offset into the page given by page_index
+ * Calculate the page and offset of the byte containing the given bit number.
+ *
+ * @eb: the extent buffer
+ * @start: offset of the bitmap item in the extent buffer
+ * @nr: bit number
+ * @page_index: return index of the page in the extent buffer that contains
+ * the given bit number
+ * @page_offset: return offset into the page given by page_index
*
* This helper hides the ugliness of finding the byte in an extent buffer which
* contains a given bit.
@@ -4614,7 +4619,8 @@ int try_release_extent_buffer(struct page *page)
}
/*
- * btrfs_readahead_tree_block - attempt to readahead a child block
+ * Attempt to readahead a child block.
+ *
* @fs_info: the fs_info
* @bytenr: bytenr to read
* @owner_root: objectid of the root that owns this eb
@@ -4653,7 +4659,8 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
}
/*
- * btrfs_readahead_node_child - readahead a node's child block
+ * Readahead a node's child block.
+ *
* @node: parent node we're reading from
* @slot: slot in the parent node for the child we want to read
*
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 68368ba99321..2171057a4477 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -80,16 +80,16 @@ struct extent_buffer {
spinlock_t refs_lock;
atomic_t refs;
int read_mirror;
- struct rcu_head rcu_head;
- pid_t lock_owner;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
s8 log_index;
+ struct rcu_head rcu_head;
struct rw_semaphore lock;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
+ pid_t lock_owner;
#endif
};
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 1ce5dd154499..45cae356e89b 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -194,7 +194,7 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_encryption(leaf, item, 0);
btrfs_set_file_extent_other_encoding(leaf, item, 0);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@@ -811,11 +811,12 @@ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
* This calls btrfs_truncate_item with the correct args based on the overlap,
* and fixes up the key as required.
*/
-static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
+static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_buffer *leaf;
const u32 csum_size = fs_info->csum_size;
u64 csum_end;
@@ -836,7 +837,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(path, new_size, 1);
+ btrfs_truncate_item(trans, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@@ -848,10 +849,10 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
- btrfs_truncate_item(path, new_size, 0);
+ btrfs_truncate_item(trans, path, new_size, 0);
key->offset = end_byte;
- btrfs_set_item_key_safe(fs_info, path, key);
+ btrfs_set_item_key_safe(trans, path, key);
} else {
BUG();
}
@@ -994,7 +995,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
key.offset = end_byte - 1;
} else {
- truncate_one_csum(fs_info, path, &key, bytenr, len);
+ truncate_one_csum(trans, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
@@ -1202,7 +1203,7 @@ extend_csum:
diff /= csum_size;
diff *= csum_size;
- btrfs_extend_item(path, diff);
+ btrfs_extend_item(trans, path, diff);
ret = 0;
goto csum;
}
@@ -1249,7 +1250,7 @@ found:
ins_size /= csum_size;
total_bytes += ins_size * fs_info->sectorsize;
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(path);
cond_resched();
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 361535c71c0f..f47731c45bb5 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -17,6 +17,7 @@
#include <linux/uio.h>
#include <linux/iversion.h>
#include <linux/fsverity.h>
+#include <linux/iomap.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -368,12 +369,13 @@ next_slot:
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->start);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0) {
btrfs_init_generic_ref(&ref,
BTRFS_ADD_DELAYED_REF,
- disk_bytenr, num_bytes, 0);
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid);
btrfs_init_data_ref(&ref,
root->root_key.objectid,
new_key.objectid,
@@ -405,13 +407,13 @@ next_slot:
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = args->end;
- btrfs_set_item_key_safe(fs_info, path, &new_key);
+ btrfs_set_item_key_safe(trans, path, &new_key);
extent_offset += args->end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->end);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += args->end - key.offset;
break;
@@ -431,7 +433,7 @@ next_slot:
btrfs_set_file_extent_num_bytes(leaf, fi,
args->start - key.offset);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += extent_end - args->start;
if (args->end == extent_end)
@@ -463,7 +465,8 @@ delete_extent_item:
} else if (update_refs && disk_bytenr > 0) {
btrfs_init_generic_ref(&ref,
BTRFS_DROP_DELAYED_REF,
- disk_bytenr, num_bytes, 0);
+ disk_bytenr, num_bytes, 0,
+ root->root_key.objectid);
btrfs_init_data_ref(&ref,
root->root_key.objectid,
key.objectid,
@@ -536,7 +539,8 @@ delete_extent_item:
if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
path->slots[0]++;
}
- btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
+ btrfs_setup_item_for_insert(trans, root, path, &key,
+ args->extent_item_size);
args->extent_inserted = true;
}
@@ -593,7 +597,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 start, u64 end)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
@@ -664,7 +667,7 @@ again:
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
- btrfs_set_item_key_safe(fs_info, path, &new_key);
+ btrfs_set_item_key_safe(trans, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
@@ -679,7 +682,7 @@ again:
trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
end - other_start);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@@ -698,7 +701,7 @@ again:
trans->transid);
path->slots[0]++;
new_key.offset = start;
- btrfs_set_item_key_safe(fs_info, path, &new_key);
+ btrfs_set_item_key_safe(trans, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@@ -708,7 +711,7 @@ again:
other_end - start);
btrfs_set_file_extent_offset(leaf, fi,
start - orig_offset);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@@ -742,10 +745,10 @@ again:
btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - split);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
- num_bytes, 0);
+ num_bytes, 0, root->root_key.objectid);
btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
orig_offset, 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
@@ -771,7 +774,7 @@ again:
other_start = end;
other_end = 0;
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- num_bytes, 0);
+ num_bytes, 0, root->root_key.objectid);
btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
0, false);
if (extent_mergeable(leaf, path->slots[0] + 1,
@@ -814,7 +817,7 @@ again:
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
} else {
fi = btrfs_item_ptr(leaf, del_slot - 1,
struct btrfs_file_extent_item);
@@ -823,7 +826,7 @@ again:
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - key.offset);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret < 0) {
@@ -1108,17 +1111,18 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
static void update_time_for_write(struct inode *inode)
{
- struct timespec64 now, ctime;
+ struct timespec64 now, ts;
if (IS_NOCMTIME(inode))
return;
now = current_time(inode);
- if (!timespec64_equal(&inode->i_mtime, &now))
- inode->i_mtime = now;
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(&ts, &now))
+ inode_set_mtime_to_ts(inode, now);
- ctime = inode_get_ctime(inode);
- if (!timespec64_equal(&ctime, &now))
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(&ts, &now))
inode_set_ctime_to_ts(inode, now);
if (IS_I_VERSION(inode))
@@ -1746,7 +1750,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
struct btrfs_inode *inode = BTRFS_I(ctx->inode);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- if (btrfs_inode_in_log(inode, fs_info->generation) &&
+ if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
list_empty(&ctx->ordered_extents))
return true;
@@ -1757,7 +1761,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
* and for a fast fsync we don't wait for that, we only wait for the
* writeback to complete.
*/
- if (inode->last_trans <= fs_info->last_trans_committed &&
+ if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
(test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
list_empty(&ctx->ordered_extents)))
return true;
@@ -1886,7 +1890,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
atomic_inc(&root->log_batch);
- smp_mb();
if (skip_inode_logging(&ctx)) {
/*
* We've had everything committed since the last time we were
@@ -2104,7 +2107,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
@@ -2112,7 +2115,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
u64 num_bytes;
key.offset = offset;
- btrfs_set_item_key_safe(fs_info, path, &key);
+ btrfs_set_item_key_safe(trans, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@@ -2121,7 +2124,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
btrfs_release_path(path);
@@ -2273,7 +2276,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
if (extent_info->is_new_extent)
btrfs_set_file_extent_generation(leaf, extent, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
@@ -2303,7 +2306,8 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
extent_info->disk_offset,
- extent_info->disk_len, 0);
+ extent_info->disk_len, 0,
+ root->root_key.objectid);
ref_offset = extent_info->file_offset - extent_info->data_offset;
btrfs_init_data_ref(&ref, root->root_key.objectid,
btrfs_ino(inode), ref_offset, 0, false);
@@ -2473,9 +2477,10 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
inode_inc_iversion(&inode->vfs_inode);
if (!extent_info || extent_info->update_times)
- inode->vfs_inode.i_mtime = inode_set_ctime_current(&inode->vfs_inode);
+ inode_set_mtime_to_ts(&inode->vfs_inode,
+ inode_set_ctime_current(&inode->vfs_inode));
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret)
break;
@@ -2714,8 +2719,8 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
ASSERT(trans != NULL);
inode_inc_iversion(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
updated_inode = true;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
@@ -2734,14 +2739,14 @@ out_only_mutex:
struct timespec64 now = inode_set_ctime_current(inode);
inode_inc_iversion(inode);
- inode->i_mtime = now;
+ inode_set_mtime_to_ts(inode, now);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
} else {
int ret2;
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
if (!ret)
ret = ret2;
@@ -2808,7 +2813,7 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
inode_set_ctime_current(inode);
i_size_write(inode, end);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
return ret ? ret : ret2;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 27fad70451aa..6f93c9a2c3e3 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -57,6 +57,11 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes, bool update_stats);
+static void btrfs_crc32c_final(u32 crc, u8 *result)
+{
+ put_unaligned_le32(~crc, result);
+}
+
static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
{
struct btrfs_free_space *info;
@@ -195,7 +200,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
btrfs_set_inode_block_group(leaf, inode_item, offset);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -213,7 +218,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header);
memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@@ -354,7 +359,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
if (ret)
goto fail;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
fail:
if (locked)
@@ -540,7 +545,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
- crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
+ crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
btrfs_crc32c_final(crc, (u8 *)&crc);
io_ctl_unmap_page(io_ctl);
tmp = page_address(io_ctl->pages[0]);
@@ -562,7 +567,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
val = *tmp;
io_ctl_map_page(io_ctl, 0);
- crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
+ crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
btrfs_crc32c_final(crc, (u8 *)&crc);
if (val != crc) {
btrfs_err_rl(io_ctl->fs_info,
@@ -1185,7 +1190,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
btrfs_set_free_space_entries(leaf, header, entries);
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@@ -1321,7 +1326,7 @@ out:
"failed to write free space cache for block group %llu error %d",
block_group->start, ret);
}
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ btrfs_update_inode(trans, BTRFS_I(inode));
if (block_group) {
/* the dirty list is protected by the dirty_bgs_lock */
@@ -1362,7 +1367,6 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
/*
* Write out cached info to an inode.
*
- * @root: root the inode belongs to
* @inode: freespace inode we are writing out
* @ctl: free space cache we are going to write out
* @block_group: block_group for this cache if it belongs to a block_group
@@ -1373,7 +1377,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
* on mount. This will return 0 if it was successful in writing the cache out,
* or an errno if it was not.
*/
-static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+static int __btrfs_write_out_cache(struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
@@ -1506,7 +1510,7 @@ out:
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
}
- btrfs_update_inode(trans, root, BTRFS_I(inode));
+ btrfs_update_inode(trans, BTRFS_I(inode));
if (must_iput)
iput(inode);
return ret;
@@ -1532,8 +1536,8 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
if (IS_ERR(inode))
return 0;
- ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
- block_group, &block_group->io_ctl, trans);
+ ret = __btrfs_write_out_cache(inode, ctl, block_group,
+ &block_group->io_ctl, trans);
if (ret) {
btrfs_debug(fs_info,
"failed to write free space cache for block group %llu error %d",
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index c0e734082dcc..7b598b070700 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -89,7 +89,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_free_space_info);
btrfs_set_free_space_extent_count(leaf, info, 0);
btrfs_set_free_space_flags(leaf, info, 0);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
out:
@@ -287,7 +287,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
if (extent_count != expected_extent_count) {
@@ -324,7 +324,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, bitmap_cursor, ptr,
data_size);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
i += extent_size;
@@ -430,7 +430,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
@@ -495,7 +495,7 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
extent_count += new_extents;
btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
@@ -533,7 +533,8 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
return !!extent_buffer_test_bit(leaf, ptr, i);
}
-static void free_space_set_bits(struct btrfs_block_group *block_group,
+static void free_space_set_bits(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 *start, u64 *size,
int bit)
{
@@ -563,7 +564,7 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
*size -= end - *start;
*start = end;
@@ -656,7 +657,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
cur_start = start;
cur_size = size;
while (1) {
- free_space_set_bits(block_group, path, &cur_start, &cur_size,
+ free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
!remove);
if (cur_size == 0)
break;
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index a523d64d5491..318df6f9d9cb 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -139,6 +139,12 @@ enum {
*/
BTRFS_FS_FEATURE_CHANGED,
+ /*
+ * Indicate that we have found a tree block which is only aligned to
+ * sectorsize, but not to nodesize. This should be rare nowadays.
+ */
+ BTRFS_FS_UNALIGNED_TREE_BLOCK,
+
#if BITS_PER_LONG == 32
/* Indicate if we have error/warn message printed on 32bit systems */
BTRFS_FS_32BIT_ERROR,
@@ -171,19 +177,17 @@ enum {
BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
- BTRFS_MOUNT_CHECK_INTEGRITY = (1UL << 19),
- BTRFS_MOUNT_CHECK_INTEGRITY_DATA = (1UL << 20),
- BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 21),
- BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 22),
- BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 23),
- BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 24),
- BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 25),
- BTRFS_MOUNT_NOLOGREPLAY = (1UL << 26),
- BTRFS_MOUNT_REF_VERIFY = (1UL << 27),
- BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 28),
- BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 29),
- BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 30),
- BTRFS_MOUNT_NODISCARD = (1UL << 31),
+ BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 19),
+ BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 20),
+ BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 21),
+ BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 22),
+ BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 23),
+ BTRFS_MOUNT_NOLOGREPLAY = (1UL << 24),
+ BTRFS_MOUNT_REF_VERIFY = (1UL << 25),
+ BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 26),
+ BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 27),
+ BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 28),
+ BTRFS_MOUNT_NODISCARD = (1UL << 29),
};
/*
@@ -216,7 +220,8 @@ enum {
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
- BTRFS_FEATURE_INCOMPAT_ZONED)
+ BTRFS_FEATURE_INCOMPAT_ZONED | \
+ BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
#ifdef CONFIG_BTRFS_DEBUG
/*
@@ -225,6 +230,7 @@ enum {
*/
#define BTRFS_FEATURE_INCOMPAT_SUPP \
(BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
+ BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \
BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
#else
@@ -369,6 +375,7 @@ struct btrfs_fs_info {
struct btrfs_root *uuid_root;
struct btrfs_root *data_reloc_root;
struct btrfs_root *block_group_root;
+ struct btrfs_root *stripe_root;
/* The log root tree is a directory of all the other log roots */
struct btrfs_root *log_root_tree;
@@ -409,7 +416,17 @@ struct btrfs_fs_info {
struct btrfs_block_rsv empty_block_rsv;
+ /*
+ * Updated while holding the lock 'trans_lock'. Due to the life cycle of
+ * a transaction, it can be directly read while holding a transaction
+ * handle, everywhere else must be read with btrfs_get_fs_generation().
+ * Should always be updated using btrfs_set_fs_generation().
+ */
u64 generation;
+ /*
+ * Always use btrfs_get_last_trans_committed() and
+ * btrfs_set_last_trans_committed() to read and update this field.
+ */
u64 last_trans_committed;
/*
* Generation of the last transaction used for block group relocation
@@ -645,9 +662,6 @@ struct btrfs_fs_info {
struct btrfs_discard_ctl discard_ctl;
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- u32 check_integrity_print_mask;
-#endif
/* Is qgroup tracking in a consistent state? */
u64 qgroup_flags;
@@ -683,6 +697,7 @@ struct btrfs_fs_info {
/* Protected by qgroup_rescan_lock */
bool qgroup_rescan_running;
u8 qgroup_drop_subtree_thres;
+ u64 qgroup_enable_gen;
/*
* If this is not 0, then it indicates a serious filesystem error has
@@ -812,6 +827,26 @@ struct btrfs_fs_info {
#endif
};
+static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
+{
+ return READ_ONCE(fs_info->generation);
+}
+
+static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
+{
+ WRITE_ONCE(fs_info->generation, gen);
+}
+
+static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
+{
+ return READ_ONCE(fs_info->last_trans_committed);
+}
+
+static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
+{
+ WRITE_ONCE(fs_info->last_trans_committed, gen);
+}
+
static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
u64 gen)
{
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 4c322b720a80..7d734830e514 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -167,7 +167,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
- btrfs_truncate_item(path, item_size - del_len, 1);
+ btrfs_truncate_item(trans, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
@@ -229,7 +229,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
- btrfs_truncate_item(path, item_size - sub_item_len, 1);
+ btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
@@ -247,7 +247,7 @@ out:
}
/*
- * btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree.
+ * Insert an extended inode ref into a tree.
*
* The caller must have checked against BTRFS_LINK_MAX already.
*/
@@ -282,7 +282,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
name))
goto out;
- btrfs_extend_item(path, ins_len);
+ btrfs_extend_item(trans, path, ins_len);
ret = 0;
}
if (ret < 0)
@@ -299,7 +299,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)&extref->name;
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
out:
btrfs_free_path(path);
@@ -338,7 +338,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
goto out;
old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
- btrfs_extend_item(path, ins_len);
+ btrfs_extend_item(trans, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
@@ -364,7 +364,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)(ref + 1);
}
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
out:
btrfs_free_path(path);
@@ -591,7 +591,7 @@ search_again:
num_dec = (orig_num_bytes - extent_num_bytes);
if (extent_start != 0)
control->sub_bytes += num_dec;
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf, fi);
@@ -617,7 +617,7 @@ search_again:
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size = btrfs_file_extent_calc_inline_size(size);
- btrfs_truncate_item(path, size, 1);
+ btrfs_truncate_item(trans, path, size, 1);
} else if (!del_item) {
/*
* We have to bail so the last_size is set to
@@ -676,7 +676,8 @@ delete:
bytes_deleted += extent_num_bytes;
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
- extent_start, extent_num_bytes, 0);
+ extent_start, extent_num_bytes, 0,
+ root->root_key.objectid);
btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
control->ino, extent_offset,
root->root_key.objectid, false);
diff --git a/fs/btrfs/inode-item.h b/fs/btrfs/inode-item.h
index ede43b6c6559..4337bb26f419 100644
--- a/fs/btrfs/inode-item.h
+++ b/fs/btrfs/inode-item.h
@@ -4,6 +4,7 @@
#define BTRFS_INODE_ITEM_H
#include <linux/types.h>
+#include <linux/crc32c.h>
struct btrfs_trans_handle;
struct btrfs_root;
@@ -12,6 +13,7 @@ struct btrfs_key;
struct btrfs_inode_extref;
struct btrfs_inode;
struct extent_buffer;
+struct fscrypt_str;
/*
* Return this if we need to call truncate_block for the last bit of the
@@ -76,6 +78,12 @@ static inline void btrfs_inode_split_flags(u64 inode_item_flags,
*ro_flags = (u32)(inode_item_flags >> 32);
}
+/* Figure the key offset of an extended inode ref. */
+static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name, int len)
+{
+ return (u64)crc32c(parent_objectid, name, len);
+}
+
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_truncate_control *control);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 7814b9d654ce..5e3fccddde0c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -71,6 +71,7 @@
#include "super.h"
#include "orphan.h"
#include "backref.h"
+#include "raid-stripe-tree.h"
struct btrfs_iget_args {
u64 ino;
@@ -348,7 +349,7 @@ static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
}
/*
- * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
+ * Lock inode i_rwsem based on arguments passed.
*
* ilock_flags can have the following bit set:
*
@@ -382,7 +383,7 @@ int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
}
/*
- * btrfs_inode_unlock - unock inode i_rwsem
+ * Unock inode i_rwsem.
*
* ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
* to decide whether the lock acquired is shared or exclusive.
@@ -573,7 +574,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
kunmap_local(kaddr);
put_page(page);
}
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
@@ -670,7 +671,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
}
btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret && ret != -ENOSPC) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -1565,8 +1566,11 @@ out_unlock:
* Phase two of compressed writeback. This is the ordered portion of the code,
* which only gets called in the order the work was queued. We walk all the
* async extents created by compress_file_range and send them down to the disk.
+ *
+ * If called with @do_free == true then it'll try to finish the work and free
+ * the work struct eventually.
*/
-static noinline void submit_compressed_extents(struct btrfs_work *work)
+static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
{
struct async_chunk *async_chunk = container_of(work, struct async_chunk,
work);
@@ -1575,6 +1579,21 @@ static noinline void submit_compressed_extents(struct btrfs_work *work)
unsigned long nr_pages;
u64 alloc_hint = 0;
+ if (do_free) {
+ struct async_chunk *async_chunk;
+ struct async_cow *async_cow;
+
+ async_chunk = container_of(work, struct async_chunk, work);
+ btrfs_add_delayed_iput(async_chunk->inode);
+ if (async_chunk->blkcg_css)
+ css_put(async_chunk->blkcg_css);
+
+ async_cow = async_chunk->async_cow;
+ if (atomic_dec_and_test(&async_cow->num_chunks))
+ kvfree(async_cow);
+ return;
+ }
+
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
PAGE_SHIFT;
@@ -1591,21 +1610,6 @@ static noinline void submit_compressed_extents(struct btrfs_work *work)
cond_wake_up_nomb(&fs_info->async_submit_wait);
}
-static noinline void async_cow_free(struct btrfs_work *work)
-{
- struct async_chunk *async_chunk;
- struct async_cow *async_cow;
-
- async_chunk = container_of(work, struct async_chunk, work);
- btrfs_add_delayed_iput(async_chunk->inode);
- if (async_chunk->blkcg_css)
- css_put(async_chunk->blkcg_css);
-
- async_cow = async_chunk->async_cow;
- if (atomic_dec_and_test(&async_cow->num_chunks))
- kvfree(async_cow);
-}
-
static bool run_delalloc_compressed(struct btrfs_inode *inode,
struct page *locked_page, u64 start,
u64 end, struct writeback_control *wbc)
@@ -1683,7 +1687,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
}
btrfs_init_work(&async_chunk[i].work, compress_file_range,
- submit_compressed_extents, async_cow_free);
+ submit_compressed_extents);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
@@ -2235,8 +2239,7 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
{
if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
if (inode->defrag_bytes &&
- test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
- 0, NULL))
+ test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
return false;
return true;
}
@@ -2847,7 +2850,7 @@ int btrfs_writepage_cow_fixup(struct page *page)
ihold(inode);
btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
get_page(page);
- btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
+ btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
fixup->page = page;
fixup->inode = BTRFS_I(inode);
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
@@ -2912,7 +2915,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(struct btrfs_file_extent_item));
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
@@ -3070,7 +3073,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
trans->block_rsv = &inode->block_rsv;
- ret = btrfs_update_inode_fallback(trans, root, inode);
+ ret = btrfs_update_inode_fallback(trans, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
@@ -3091,6 +3094,10 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
trans->block_rsv = &inode->block_rsv;
+ ret = btrfs_insert_raid_extent(trans, ordered_extent);
+ if (ret)
+ goto out;
+
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
@@ -3136,7 +3143,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
&cached_state);
btrfs_inode_safe_disk_i_size_write(inode, 0);
- ret = btrfs_update_inode_fallback(trans, root, inode);
+ ret = btrfs_update_inode_fallback(trans, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
@@ -3224,7 +3231,8 @@ out:
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
{
if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) &&
- !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
+ list_empty(&ordered->bioc_list))
btrfs_finish_ordered_zoned(ordered);
return btrfs_finish_one_ordered(ordered);
}
@@ -3282,7 +3290,7 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
if (btrfs_is_data_reloc_root(inode->root) &&
test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
- 1, NULL)) {
+ NULL)) {
/* Skip the range without csum for data reloc inode */
clear_extent_bits(&inode->io_tree, file_offset, end,
EXTENT_NODATASUM);
@@ -3306,7 +3314,7 @@ zeroit:
}
/*
- * btrfs_add_delayed_iput - perform a delayed iput on @inode
+ * Perform a delayed iput on @inode.
*
* @inode: The inode we want to perform iput on
*
@@ -3754,19 +3762,17 @@ static int btrfs_read_locked_inode(struct inode *inode,
btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
round_up(i_size_read(inode), fs_info->sectorsize));
- inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
- inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
+ inode_set_atime(inode, btrfs_timespec_sec(leaf, &inode_item->atime),
+ btrfs_timespec_nsec(leaf, &inode_item->atime));
- inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
- inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
+ inode_set_mtime(inode, btrfs_timespec_sec(leaf, &inode_item->mtime),
+ btrfs_timespec_nsec(leaf, &inode_item->mtime));
inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
btrfs_timespec_nsec(leaf, &inode_item->ctime));
- BTRFS_I(inode)->i_otime.tv_sec =
- btrfs_timespec_sec(leaf, &inode_item->otime);
- BTRFS_I(inode)->i_otime.tv_nsec =
- btrfs_timespec_nsec(leaf, &inode_item->otime);
+ BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
+ BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
@@ -3792,7 +3798,7 @@ cache_index:
* This is required for both inode re-read from disk and delayed inode
* in delayed_nodes_tree.
*/
- if (BTRFS_I(inode)->last_trans == fs_info->generation)
+ if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
@@ -3922,24 +3928,22 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
btrfs_set_token_timespec_sec(&token, &item->atime,
- inode->i_atime.tv_sec);
+ inode_get_atime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode->i_atime.tv_nsec);
+ inode_get_atime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode->i_mtime.tv_sec);
+ inode_get_mtime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode->i_mtime.tv_nsec);
+ inode_get_mtime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode_get_ctime(inode).tv_nsec);
+ inode_get_ctime_nsec(inode));
- btrfs_set_token_timespec_sec(&token, &item->otime,
- BTRFS_I(inode)->i_otime.tv_sec);
- btrfs_set_token_timespec_nsec(&token, &item->otime,
- BTRFS_I(inode)->i_otime.tv_nsec);
+ btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
btrfs_set_token_inode_generation(&token, item,
@@ -3957,8 +3961,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_inode *inode)
+ struct btrfs_inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
@@ -3969,7 +3972,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
+ ret = btrfs_lookup_inode(trans, inode->root, path, &inode->location, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@@ -3981,7 +3984,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
@@ -3992,10 +3995,10 @@ failed:
/*
* copy everything in the in-memory inode into the btree.
*/
-noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_inode *inode)
+int btrfs_update_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_inode *inode)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -4011,23 +4014,23 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
&& !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
btrfs_update_root_times(trans, root);
- ret = btrfs_delayed_update_inode(trans, root, inode);
+ ret = btrfs_delayed_update_inode(trans, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
- return btrfs_update_inode_item(trans, root, inode);
+ return btrfs_update_inode_item(trans, inode);
}
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct btrfs_inode *inode)
+ struct btrfs_inode *inode)
{
int ret;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret == -ENOSPC)
- return btrfs_update_inode_item(trans, root, inode);
+ return btrfs_update_inode_item(trans, inode);
return ret;
}
@@ -4132,9 +4135,8 @@ err:
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
inode_inc_iversion(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
- inode_set_ctime_current(&inode->vfs_inode);
- dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
- ret = btrfs_update_inode(trans, root, dir);
+ inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
+ ret = btrfs_update_inode(trans, dir);
out:
return ret;
}
@@ -4148,7 +4150,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
if (!ret) {
drop_nlink(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, inode->root, inode);
+ ret = btrfs_update_inode(trans, inode);
}
return ret;
}
@@ -4306,8 +4308,8 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
inode_inc_iversion(&dir->vfs_inode);
- dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
- ret = btrfs_update_inode_fallback(trans, root, dir);
+ inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
+ ret = btrfs_update_inode_fallback(trans, dir);
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@@ -4641,7 +4643,8 @@ out_notrans:
}
/*
- * btrfs_truncate_block - read, zero a chunk and write a block
+ * Read, zero a chunk and write a block.
+ *
* @inode - inode that we're zeroing
* @from - the offset to start zeroing
* @len - the length to zero, 0 to zero the entire range respective to the
@@ -4791,9 +4794,9 @@ out:
return ret;
}
-static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
- u64 offset, u64 len)
+static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
{
+ struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_drop_extents_args drop_args = { 0 };
@@ -4833,7 +4836,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
btrfs_abort_transaction(trans, ret);
} else {
btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
- btrfs_update_inode(trans, root, inode);
+ btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
return ret;
@@ -4889,8 +4892,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
struct extent_map *hole_em;
- err = maybe_insert_hole(root, inode, cur_offset,
- hole_size);
+ err = maybe_insert_hole(inode, cur_offset, hole_size);
if (err)
break;
@@ -4916,7 +4918,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
hole_em->orig_block_len = 0;
hole_em->ram_bytes = hole_size;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
- hole_em->generation = fs_info->generation;
+ hole_em->generation = btrfs_get_fs_generation(fs_info);
err = btrfs_replace_extent_map_range(inode, hole_em, true);
free_extent_map(hole_em);
@@ -4956,7 +4958,8 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
if (newsize != oldsize) {
inode_inc_iversion(inode);
if (!(mask & (ATTR_CTIME | ATTR_MTIME))) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
}
@@ -4984,7 +4987,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
i_size_write(inode, newsize);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
pagecache_isize_extended(inode, oldsize, newsize);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_end_transaction(trans);
} else {
@@ -5582,6 +5585,7 @@ static struct inode *new_simple_dir(struct inode *dir,
struct btrfs_key *key,
struct btrfs_root *root)
{
+ struct timespec64 ts;
struct inode *inode = new_inode(dir->i_sb);
if (!inode)
@@ -5600,9 +5604,13 @@ static struct inode *new_simple_dir(struct inode *dir,
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
- inode->i_mtime = inode_set_ctime_current(inode);
- inode->i_atime = dir->i_atime;
- BTRFS_I(inode)->i_otime = inode->i_mtime;
+
+ ts = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, ts);
+ inode_set_atime_to_ts(inode, inode_get_atime(dir));
+ BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
+ BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
+
inode->i_uid = dir->i_uid;
inode->i_gid = dir->i_gid;
@@ -6000,15 +6008,15 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_update_inode(trans, root, inode);
- if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
+ ret = btrfs_update_inode(trans, inode);
+ if (ret == -ENOSPC || ret == -EDQUOT) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
if (inode->delayed_node)
@@ -6024,7 +6032,7 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode)
static int btrfs_update_time(struct inode *inode, int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- bool dirty = flags & ~S_VERSION;
+ bool dirty;
if (btrfs_root_readonly(root))
return -EROFS;
@@ -6160,6 +6168,7 @@ static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *
int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_new_inode_args *args)
{
+ struct timespec64 ts;
struct inode *dir = args->dir;
struct inode *inode = args->inode;
const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
@@ -6277,9 +6286,9 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
goto discard;
}
- inode->i_mtime = inode_set_ctime_current(inode);
- inode->i_atime = inode->i_mtime;
- BTRFS_I(inode)->i_otime = inode->i_mtime;
+ ts = simple_inode_init_ts(inode);
+ BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
+ BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
/*
* We're going to fill the inode item now, so at this point the inode
@@ -6310,7 +6319,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
}
}
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
/*
* We don't need the path anymore, plus inheriting properties, adding
* ACLs, security xattrs, orphan item or adding the link, will result in
@@ -6444,10 +6453,10 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
* values (the ones it had when the fsync was done).
*/
if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags))
- parent_inode->vfs_inode.i_mtime =
- inode_set_ctime_current(&parent_inode->vfs_inode);
+ inode_set_mtime_to_ts(&parent_inode->vfs_inode,
+ inode_set_ctime_current(&parent_inode->vfs_inode));
- ret = btrfs_update_inode(trans, root, parent_inode);
+ ret = btrfs_update_inode(trans, parent_inode);
if (ret)
btrfs_abort_transaction(trans, ret);
return ret;
@@ -6598,7 +6607,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
} else {
struct dentry *parent = dentry->d_parent;
- err = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ err = btrfs_update_inode(trans, BTRFS_I(inode));
if (err)
goto fail;
if (inode->i_nlink == 1) {
@@ -7103,8 +7112,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
range_end = round_up(offset + nocow_args.num_bytes,
root->fs_info->sectorsize) - 1;
- ret = test_range_bit(io_tree, offset, range_end,
- EXTENT_DELALLOC, 0, NULL);
+ ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
if (ret) {
ret = -EAGAIN;
goto out;
@@ -8005,11 +8013,11 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, &cached_state);
- spin_lock_irq(&inode->ordered_tree.lock);
+ spin_lock_irq(&inode->ordered_tree_lock);
set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
ordered->truncated_len = min(ordered->truncated_len,
cur - ordered->file_offset);
- spin_unlock_irq(&inode->ordered_tree.lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
/*
* If the ordered extent has finished, we're safe to delete all
@@ -8339,7 +8347,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
if (ret != -ENOSPC && ret != -EAGAIN)
break;
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret)
break;
@@ -8392,7 +8400,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
int ret2;
trans->block_rsv = &fs_info->trans_block_rsv;
- ret2 = btrfs_update_inode(trans, root, inode);
+ ret2 = btrfs_update_inode(trans, inode);
if (ret2 && !ret)
ret = ret2;
@@ -8481,8 +8489,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->delayed_node = NULL;
- ei->i_otime.tv_sec = 0;
- ei->i_otime.tv_nsec = 0;
+ ei->i_otime_sec = 0;
+ ei->i_otime_nsec = 0;
inode = &ei->vfs_inode;
extent_map_tree_init(&ei->extent_tree);
@@ -8491,7 +8499,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
extent_io_tree_init(fs_info, &ei->file_extent_tree,
IO_TREE_INODE_FILE_EXTENT);
mutex_init(&ei->log_mutex);
- btrfs_ordered_inode_tree_init(&ei->ordered_tree);
+ spin_lock_init(&ei->ordered_tree_lock);
+ ei->ordered_tree = RB_ROOT;
+ ei->ordered_tree_last = NULL;
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
RB_CLEAR_NODE(&ei->rb_node);
@@ -8634,8 +8644,8 @@ static int btrfs_getattr(struct mnt_idmap *idmap,
u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
stat->result_mask |= STATX_BTIME;
- stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
- stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
+ stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
+ stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
if (bi_flags & BTRFS_INODE_APPEND)
stat->attributes |= STATX_ATTR_APPEND;
if (bi_flags & BTRFS_INODE_COMPRESS)
@@ -8823,7 +8833,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
BTRFS_I(old_dentry->d_inode),
old_name, &old_rename_ctx);
if (!ret)
- ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -8838,7 +8848,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
BTRFS_I(new_dentry->d_inode),
new_name, &new_rename_ctx);
if (!ret)
- ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -9083,7 +9093,7 @@ static int btrfs_rename(struct mnt_idmap *idmap,
BTRFS_I(d_inode(old_dentry)),
&old_fname.disk_name, &rename_ctx);
if (!ret)
- ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -9208,7 +9218,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
- btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
+ btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
return work;
}
@@ -9446,7 +9456,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
d_instantiate_new(dentry, inode);
@@ -9639,7 +9649,7 @@ next:
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 8e7d03bc1b56..752acff2c734 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -385,7 +385,7 @@ update_flags:
btrfs_sync_inode_flags_to_i_flags(inode);
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
out_end_trans:
btrfs_end_transaction(trans);
@@ -652,18 +652,18 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
/* Tree log can't currently deal with an inode which is a new root. */
btrfs_set_log_full_commit(trans);
- ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
+ ret = btrfs_qgroup_inherit(trans, 0, objectid, root->root_key.objectid, inherit);
if (ret)
goto out;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
- BTRFS_NESTING_NORMAL);
+ 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
goto out;
}
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
inode_item = &root_item->inode;
btrfs_set_stack_inode_generation(inode_item, 1);
@@ -2635,6 +2635,12 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
return -EINVAL;
}
+ if (fs_info->fs_devices->temp_fsid) {
+ btrfs_err(fs_info,
+ "device add not supported on cloned temp-fsid mount");
+ return -EINVAL;
+ }
+
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD)) {
if (!btrfs_exclop_start_try_lock(fs_info, BTRFS_EXCLOP_DEV_ADD))
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
@@ -2676,8 +2682,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args_v2 *vol_args;
- struct block_device *bdev = NULL;
- void *holder;
+ struct bdev_handle *bdev_handle = NULL;
int ret;
bool cancel = false;
@@ -2714,7 +2719,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
goto err_drop;
/* Exclusive operation is now claimed */
- ret = btrfs_rm_device(fs_info, &args, &bdev, &holder);
+ ret = btrfs_rm_device(fs_info, &args, &bdev_handle);
btrfs_exclop_finish(fs_info);
@@ -2728,8 +2733,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
}
err_drop:
mnt_drop_write_file(file);
- if (bdev)
- blkdev_put(bdev, holder);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);
@@ -2742,8 +2747,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ioctl_vol_args *vol_args;
- struct block_device *bdev = NULL;
- void *holder;
+ struct bdev_handle *bdev_handle = NULL;
int ret;
bool cancel = false;
@@ -2770,15 +2774,15 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE,
cancel);
if (ret == 0) {
- ret = btrfs_rm_device(fs_info, &args, &bdev, &holder);
+ ret = btrfs_rm_device(fs_info, &args, &bdev_handle);
if (!ret)
btrfs_info(fs_info, "disk deleted %s", vol_args->name);
btrfs_exclop_finish(fs_info);
}
mnt_drop_write_file(file);
- if (bdev)
- blkdev_put(bdev, holder);
+ if (bdev_handle)
+ bdev_release(bdev_handle);
out:
btrfs_put_dev_args_from_path(&args);
kfree(vol_args);
@@ -2822,7 +2826,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
}
if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
- fi_args->generation = fs_info->generation;
+ fi_args->generation = btrfs_get_fs_generation(fs_info);
fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
}
@@ -2947,7 +2951,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
@@ -3131,7 +3135,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
return PTR_ERR(trans);
/* No running transaction, don't bother */
- transid = root->fs_info->last_trans_committed;
+ transid = btrfs_get_last_trans_committed(root->fs_info);
goto out;
}
transid = trans->transid;
@@ -3697,7 +3701,8 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
- ret = btrfs_quota_enable(fs_info);
+ case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA:
+ ret = btrfs_quota_enable(fs_info, sa);
break;
case BTRFS_QUOTA_CTL_DISABLE:
ret = btrfs_quota_disable(fs_info);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 7979449a58d6..74d8e2003f58 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <linux/page-flags.h>
#include <asm/bug.h>
+#include <trace/events/btrfs.h>
#include "misc.h"
#include "ctree.h"
#include "extent_io.h"
@@ -73,6 +74,7 @@ static struct btrfs_lockdep_keyset {
{ .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
{ .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
+ { .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") },
{ .id = 0, DEFINE_NAME("tree") },
};
@@ -102,6 +104,15 @@ void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buff
#endif
+#ifdef CONFIG_BTRFS_DEBUG
+static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner)
+{
+ eb->lock_owner = owner;
+}
+#else
+static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { }
+#endif
+
/*
* Extent buffer locking
* =====================
@@ -164,7 +175,7 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
if (down_write_trylock(&eb->lock)) {
- eb->lock_owner = current->pid;
+ btrfs_set_eb_lock_owner(eb, current->pid);
trace_btrfs_try_tree_write_lock(eb);
return 1;
}
@@ -181,7 +192,8 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
}
/*
- * __btrfs_tree_lock - lock eb for write
+ * Lock eb for write.
+ *
* @eb: the eb to lock
* @nest: the nesting to use for the lock
*
@@ -196,7 +208,7 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
start_ns = ktime_get_ns();
down_write_nested(&eb->lock, nest);
- eb->lock_owner = current->pid;
+ btrfs_set_eb_lock_owner(eb, current->pid);
trace_btrfs_tree_lock(eb, start_ns);
}
@@ -211,7 +223,7 @@ void btrfs_tree_lock(struct extent_buffer *eb)
void btrfs_tree_unlock(struct extent_buffer *eb)
{
trace_btrfs_tree_unlock(eb);
- eb->lock_owner = 0;
+ btrfs_set_eb_lock_owner(eb, 0);
up_write(&eb->lock);
}
diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
index 7695decc7243..b8f9c9e56c8c 100644
--- a/fs/btrfs/messages.c
+++ b/fs/btrfs/messages.c
@@ -72,11 +72,11 @@ static void btrfs_state_to_string(const struct btrfs_fs_info *info, char *buf)
* over the error. Each subsequent error that doesn't have any context
* of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR.
*/
-const char * __attribute_const__ btrfs_decode_error(int errno)
+const char * __attribute_const__ btrfs_decode_error(int error)
{
char *errstr = "unknown";
- switch (errno) {
+ switch (error) {
case -ENOENT: /* -2 */
errstr = "No such entry";
break;
@@ -110,12 +110,12 @@ const char * __attribute_const__ btrfs_decode_error(int errno)
}
/*
- * __btrfs_handle_fs_error decodes expected errors from the caller and
- * invokes the appropriate error response.
+ * Decodes expected errors from the caller and invokes the appropriate error
+ * response.
*/
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...)
+ unsigned int line, int error, const char *fmt, ...)
{
struct super_block *sb = fs_info->sb;
#ifdef CONFIG_PRINTK
@@ -132,11 +132,11 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
* Special case: if the error is EROFS, and we're already under
* SB_RDONLY, then it is safe here.
*/
- if (errno == -EROFS && sb_rdonly(sb))
+ if (error == -EROFS && sb_rdonly(sb))
return;
#ifdef CONFIG_PRINTK
- errstr = btrfs_decode_error(errno);
+ errstr = btrfs_decode_error(error);
btrfs_state_to_string(fs_info, statestr);
if (fmt) {
struct va_format vaf;
@@ -147,11 +147,11 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
vaf.va = &args;
pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s (%pV)\n",
- sb->s_id, statestr, function, line, errno, errstr, &vaf);
+ sb->s_id, statestr, function, line, error, errstr, &vaf);
va_end(args);
} else {
pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s\n",
- sb->s_id, statestr, function, line, errno, errstr);
+ sb->s_id, statestr, function, line, error, errstr);
}
#endif
@@ -159,7 +159,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
* Today we only save the error info to memory. Long term we'll also
* send it down to the disk.
*/
- WRITE_ONCE(fs_info->fs_error, errno);
+ WRITE_ONCE(fs_info->fs_error, error);
/* Don't go through full error handling during mount. */
if (!(sb->s_flags & SB_BORN))
@@ -283,12 +283,12 @@ void __cold btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info)
#endif
/*
- * __btrfs_panic decodes unexpected, fatal errors from the caller, issues an
- * alert, and either panics or BUGs, depending on mount options.
+ * Decode unexpected, fatal errors from the caller, issue an alert, and either
+ * panic or BUGs, depending on mount options.
*/
__cold
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...)
+ unsigned int line, int error, const char *fmt, ...)
{
char *s_id = "<unknown>";
const char *errstr;
@@ -301,13 +301,13 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
va_start(args, fmt);
vaf.va = &args;
- errstr = btrfs_decode_error(errno);
+ errstr = btrfs_decode_error(error);
if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
- s_id, function, line, &vaf, errno, errstr);
+ s_id, function, line, &vaf, error, errstr);
btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
- function, line, &vaf, errno, errstr);
+ function, line, &vaf, error, errstr);
va_end(args);
/* Caller calls BUG() */
}
diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h
index 1ae6f8e23e07..4d04c1fa5899 100644
--- a/fs/btrfs/messages.h
+++ b/fs/btrfs/messages.h
@@ -184,25 +184,25 @@ do { \
__printf(5, 6)
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...);
+ unsigned int line, int error, const char *fmt, ...);
-const char * __attribute_const__ btrfs_decode_error(int errno);
+const char * __attribute_const__ btrfs_decode_error(int error);
-#define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \
+#define btrfs_handle_fs_error(fs_info, error, fmt, args...) \
__btrfs_handle_fs_error((fs_info), __func__, __LINE__, \
- (errno), fmt, ##args)
+ (error), fmt, ##args)
__printf(5, 6)
__cold
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
- unsigned int line, int errno, const char *fmt, ...);
+ unsigned int line, int error, const char *fmt, ...);
/*
* If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
* will panic(). Otherwise we BUG() here.
*/
-#define btrfs_panic(fs_info, errno, fmt, args...) \
+#define btrfs_panic(fs_info, error, fmt, args...) \
do { \
- __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
+ __btrfs_panic(fs_info, __func__, __LINE__, error, fmt, ##args); \
BUG(); \
} while (0)
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 345c449d588c..574e8a55e24a 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -124,25 +124,24 @@ static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
* look find the first ordered struct that has this offset, otherwise
* the first one less than this offset
*/
-static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
- u64 file_offset)
+static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
+ u64 file_offset)
{
- struct rb_root *root = &tree->tree;
struct rb_node *prev = NULL;
struct rb_node *ret;
struct btrfs_ordered_extent *entry;
- if (tree->last) {
- entry = rb_entry(tree->last, struct btrfs_ordered_extent,
+ if (inode->ordered_tree_last) {
+ entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
rb_node);
if (in_range(file_offset, entry->file_offset, entry->num_bytes))
- return tree->last;
+ return inode->ordered_tree_last;
}
- ret = __tree_search(root, file_offset, &prev);
+ ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
if (!ret)
ret = prev;
if (ret)
- tree->last = ret;
+ inode->ordered_tree_last = ret;
return ret;
}
@@ -191,6 +190,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
INIT_LIST_HEAD(&entry->log_list);
INIT_LIST_HEAD(&entry->root_extent_list);
INIT_LIST_HEAD(&entry->work_list);
+ INIT_LIST_HEAD(&entry->bioc_list);
init_completion(&entry->completion);
/*
@@ -208,7 +208,6 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
{
struct btrfs_inode *inode = BTRFS_I(entry->inode);
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
@@ -221,13 +220,14 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
/* One ref for the tree. */
refcount_inc(&entry->refs);
- spin_lock_irq(&tree->lock);
- node = tree_insert(&tree->tree, entry->file_offset, &entry->rb_node);
+ spin_lock_irq(&inode->ordered_tree_lock);
+ node = tree_insert(&inode->ordered_tree, entry->file_offset,
+ &entry->rb_node);
if (node)
btrfs_panic(fs_info, -EEXIST,
"inconsistency in ordered tree at offset %llu",
entry->file_offset);
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
spin_lock(&root->ordered_extent_lock);
list_add_tail(&entry->root_extent_list,
@@ -287,12 +287,11 @@ struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum)
{
- struct btrfs_ordered_inode_tree *tree;
+ struct btrfs_inode *inode = BTRFS_I(entry->inode);
- tree = &BTRFS_I(entry->inode)->ordered_tree;
- spin_lock_irq(&tree->lock);
+ spin_lock_irq(&inode->ordered_tree_lock);
list_add_tail(&sum->list, &entry->list);
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
}
static void finish_ordered_fn(struct btrfs_work *work)
@@ -310,7 +309,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- lockdep_assert_held(&inode->ordered_tree.lock);
+ lockdep_assert_held(&inode->ordered_tree_lock);
if (page) {
ASSERT(page->mapping);
@@ -364,7 +363,7 @@ static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
fs_info->endio_freespace_worker : fs_info->endio_write_workers;
- btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL);
+ btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
btrfs_queue_work(wq, &ordered->work);
}
@@ -378,9 +377,9 @@ bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
- spin_lock_irqsave(&inode->ordered_tree.lock, flags);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
- spin_unlock_irqrestore(&inode->ordered_tree.lock, flags);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
if (ret)
btrfs_queue_ordered_fn(ordered);
@@ -404,7 +403,6 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
u64 num_bytes, bool uptodate)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
unsigned long flags;
@@ -414,13 +412,13 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
file_offset + num_bytes - 1,
uptodate);
- spin_lock_irqsave(&tree->lock, flags);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
while (cur < file_offset + num_bytes) {
u64 entry_end;
u64 end;
u32 len;
- node = tree_search(tree, cur);
+ node = ordered_tree_search(inode, cur);
/* No ordered extents at all */
if (!node)
break;
@@ -467,13 +465,13 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
len = end + 1 - cur;
if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
- spin_unlock_irqrestore(&tree->lock, flags);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
btrfs_queue_ordered_fn(entry);
- spin_lock_irqsave(&tree->lock, flags);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
}
cur += len;
}
- spin_unlock_irqrestore(&tree->lock, flags);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
}
/*
@@ -497,19 +495,18 @@ bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
unsigned long flags;
bool finished = false;
- spin_lock_irqsave(&tree->lock, flags);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
if (cached && *cached) {
entry = *cached;
goto have_entry;
}
- node = tree_search(tree, file_offset);
+ node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@@ -540,7 +537,7 @@ out:
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
}
- spin_unlock_irqrestore(&tree->lock, flags);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
return finished;
}
@@ -578,7 +575,6 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry)
{
- struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = btrfs_inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
@@ -609,16 +605,15 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
fs_info->delalloc_batch);
- tree = &btrfs_inode->ordered_tree;
- spin_lock_irq(&tree->lock);
+ spin_lock_irq(&btrfs_inode->ordered_tree_lock);
node = &entry->rb_node;
- rb_erase(node, &tree->tree);
+ rb_erase(node, &btrfs_inode->ordered_tree);
RB_CLEAR_NODE(node);
- if (tree->last == node)
- tree->last = NULL;
+ if (btrfs_inode->ordered_tree_last == node)
+ btrfs_inode->ordered_tree_last = NULL;
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
/*
* The current running transaction is waiting on us, we need to let it
@@ -711,7 +706,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
spin_unlock(&root->ordered_extent_lock);
btrfs_init_work(&ordered->flush_work,
- btrfs_run_ordered_extent_work, NULL, NULL);
+ btrfs_run_ordered_extent_work, NULL);
list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
@@ -875,14 +870,12 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
u64 file_offset)
{
- struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
unsigned long flags;
- tree = &inode->ordered_tree;
- spin_lock_irqsave(&tree->lock, flags);
- node = tree_search(tree, file_offset);
+ spin_lock_irqsave(&inode->ordered_tree_lock, flags);
+ node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@@ -894,7 +887,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
trace_btrfs_ordered_extent_lookup(inode, entry);
}
out:
- spin_unlock_irqrestore(&tree->lock, flags);
+ spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
return entry;
}
@@ -904,15 +897,13 @@ out:
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct btrfs_inode *inode, u64 file_offset, u64 len)
{
- struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- tree = &inode->ordered_tree;
- spin_lock_irq(&tree->lock);
- node = tree_search(tree, file_offset);
+ spin_lock_irq(&inode->ordered_tree_lock);
+ node = ordered_tree_search(inode, file_offset);
if (!node) {
- node = tree_search(tree, file_offset + len);
+ node = ordered_tree_search(inode, file_offset + len);
if (!node)
goto out;
}
@@ -936,7 +927,7 @@ out:
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_range(inode, entry);
}
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
return entry;
}
@@ -947,13 +938,12 @@ out:
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct list_head *list)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *n;
ASSERT(inode_is_locked(&inode->vfs_inode));
- spin_lock_irq(&tree->lock);
- for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+ spin_lock_irq(&inode->ordered_tree_lock);
+ for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
struct btrfs_ordered_extent *ordered;
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
@@ -966,7 +956,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
refcount_inc(&ordered->refs);
trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
}
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
}
/*
@@ -976,13 +966,11 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
{
- struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
- tree = &inode->ordered_tree;
- spin_lock_irq(&tree->lock);
- node = tree_search(tree, file_offset);
+ spin_lock_irq(&inode->ordered_tree_lock);
+ node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@@ -990,7 +978,7 @@ btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_first(inode, entry);
out:
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
return entry;
}
@@ -1006,15 +994,14 @@ out:
struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
struct btrfs_inode *inode, u64 file_offset, u64 len)
{
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct rb_node *cur;
struct rb_node *prev;
struct rb_node *next;
struct btrfs_ordered_extent *entry = NULL;
- spin_lock_irq(&tree->lock);
- node = tree->tree.rb_node;
+ spin_lock_irq(&inode->ordered_tree_lock);
+ node = inode->ordered_tree.rb_node;
/*
* Here we don't want to use tree_search() which will use tree->last
* and screw up the search order.
@@ -1068,7 +1055,7 @@ out:
trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
}
- spin_unlock_irq(&tree->lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
return entry;
}
@@ -1147,7 +1134,6 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
struct btrfs_ordered_extent *ordered, u64 len)
{
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
- struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 file_offset = ordered->file_offset;
@@ -1187,13 +1173,13 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
refcount_inc(&new->refs);
spin_lock_irq(&root->ordered_extent_lock);
- spin_lock(&tree->lock);
+ spin_lock(&inode->ordered_tree_lock);
/* Remove from tree once */
node = &ordered->rb_node;
- rb_erase(node, &tree->tree);
+ rb_erase(node, &inode->ordered_tree);
RB_CLEAR_NODE(node);
- if (tree->last == node)
- tree->last = NULL;
+ if (inode->ordered_tree_last == node)
+ inode->ordered_tree_last = NULL;
ordered->file_offset += len;
ordered->disk_bytenr += len;
@@ -1224,18 +1210,19 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
}
/* Re-insert the node */
- node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
+ node = tree_insert(&inode->ordered_tree, ordered->file_offset,
+ &ordered->rb_node);
if (node)
btrfs_panic(fs_info, -EEXIST,
"zoned: inconsistency in ordered tree at offset %llu",
ordered->file_offset);
- node = tree_insert(&tree->tree, new->file_offset, &new->rb_node);
+ node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
if (node)
btrfs_panic(fs_info, -EEXIST,
"zoned: inconsistency in ordered tree at offset %llu",
new->file_offset);
- spin_unlock(&tree->lock);
+ spin_unlock(&inode->ordered_tree_lock);
list_add_tail(&new->root_extent_list, &root->ordered_extents);
root->nr_ordered_extents++;
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 173bd5c5df26..567a6d3d4712 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -6,13 +6,6 @@
#ifndef BTRFS_ORDERED_DATA_H
#define BTRFS_ORDERED_DATA_H
-/* one of these per inode */
-struct btrfs_ordered_inode_tree {
- spinlock_t lock;
- struct rb_root tree;
- struct rb_node *last;
-};
-
struct btrfs_ordered_sum {
/*
* Logical start address and length for of the blocks covered by
@@ -151,15 +144,9 @@ struct btrfs_ordered_extent {
struct completion completion;
struct btrfs_work flush_work;
struct list_head work_list;
-};
-static inline void
-btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
-{
- spin_lock_init(&t->lock);
- t->tree = RB_ROOT;
- t->last = NULL;
-}
+ struct list_head bioc_list;
+};
int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent);
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 0c93439e929f..7e46aa8a0444 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -9,6 +9,8 @@
#include "print-tree.h"
#include "accessors.h"
#include "tree-checker.h"
+#include "volumes.h"
+#include "raid-stripe-tree.h"
struct root_name_map {
u64 id;
@@ -28,6 +30,7 @@ static const struct root_name_map root_map[] = {
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" },
{ BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
+ { BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" },
};
const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
@@ -80,12 +83,20 @@ static void print_extent_data_ref(const struct extent_buffer *eb,
btrfs_extent_data_ref_count(eb, ref));
}
+static void print_extent_owner_ref(const struct extent_buffer *eb,
+ const struct btrfs_extent_owner_ref *ref)
+{
+ ASSERT(btrfs_fs_incompat(eb->fs_info, SIMPLE_QUOTA));
+ pr_cont("extent data owner root %llu\n", btrfs_extent_owner_ref_root_id(eb, ref));
+}
+
static void print_extent_item(const struct extent_buffer *eb, int slot, int type)
{
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
+ struct btrfs_extent_owner_ref *oref;
struct btrfs_disk_key key;
unsigned long end;
unsigned long ptr;
@@ -161,6 +172,10 @@ static void print_extent_item(const struct extent_buffer *eb, int slot, int type
"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
offset, eb->fs_info->sectorsize);
break;
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
+ print_extent_owner_ref(eb, oref);
+ break;
default:
pr_cont("(extent %llu has INVALID ref type %d)\n",
eb->start, type);
@@ -189,6 +204,22 @@ static void print_uuid_item(const struct extent_buffer *l, unsigned long offset,
}
}
+static void print_raid_stripe_key(const struct extent_buffer *eb, u32 item_size,
+ struct btrfs_stripe_extent *stripe)
+{
+ const int num_stripes = btrfs_num_raid_stripes(item_size);
+ const u8 encoding = btrfs_stripe_extent_encoding(eb, stripe);
+
+ pr_info("\t\t\tencoding: %s\n",
+ (encoding && encoding < BTRFS_NR_RAID_TYPES) ?
+ btrfs_raid_array[encoding].raid_name : "unknown");
+
+ for (int i = 0; i < num_stripes; i++)
+ pr_info("\t\t\tstride %d devid %llu physical %llu\n",
+ i, btrfs_raid_stride_devid(eb, &stripe->strides[i]),
+ btrfs_raid_stride_physical(eb, &stripe->strides[i]));
+}
+
/*
* Helper to output refs and locking status of extent buffer. Useful to debug
* race condition related problems.
@@ -349,6 +380,10 @@ void btrfs_print_leaf(const struct extent_buffer *l)
print_uuid_item(l, btrfs_item_ptr_offset(l, i),
btrfs_item_size(l, i));
break;
+ case BTRFS_RAID_STRIPE_KEY:
+ print_raid_stripe_key(l, btrfs_item_size(l, i),
+ btrfs_item_ptr(l, i, struct btrfs_stripe_extent));
+ break;
}
}
}
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 0755af0e53e3..f9bf591a0718 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -15,6 +15,7 @@
#include "fs.h"
#include "accessors.h"
#include "super.h"
+#include "dir-item.h"
#define BTRFS_PROP_HANDLERS_HT_BITS 8
static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index b99230db3c82..edb84cc03237 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -30,6 +30,25 @@
#include "root-tree.h"
#include "tree-checker.h"
+enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info)
+{
+ if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ return BTRFS_QGROUP_MODE_DISABLED;
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
+ return BTRFS_QGROUP_MODE_SIMPLE;
+ return BTRFS_QGROUP_MODE_FULL;
+}
+
+bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info)
+{
+ return btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_DISABLED;
+}
+
+bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info)
+{
+ return btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL;
+}
+
/*
* Helpers to access qgroup reservation
*
@@ -146,16 +165,6 @@ struct btrfs_qgroup_list {
struct btrfs_qgroup *member;
};
-static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
-{
- return (u64)(uintptr_t)qg;
-}
-
-static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
-{
- return (struct btrfs_qgroup *)(uintptr_t)n->aux;
-}
-
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
int init_flags);
@@ -180,34 +189,46 @@ static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
return NULL;
}
-/* must be called with qgroup_lock held */
+/*
+ * Add qgroup to the filesystem's qgroup tree.
+ *
+ * Must be called with qgroup_lock held and @prealloc preallocated.
+ *
+ * The control on the lifespan of @prealloc would be transfered to this
+ * function, thus caller should no longer touch @prealloc.
+ */
static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup *prealloc,
u64 qgroupid)
{
struct rb_node **p = &fs_info->qgroup_tree.rb_node;
struct rb_node *parent = NULL;
struct btrfs_qgroup *qgroup;
+ /* Caller must have pre-allocated @prealloc. */
+ ASSERT(prealloc);
+
while (*p) {
parent = *p;
qgroup = rb_entry(parent, struct btrfs_qgroup, node);
- if (qgroup->qgroupid < qgroupid)
+ if (qgroup->qgroupid < qgroupid) {
p = &(*p)->rb_left;
- else if (qgroup->qgroupid > qgroupid)
+ } else if (qgroup->qgroupid > qgroupid) {
p = &(*p)->rb_right;
- else
+ } else {
+ kfree(prealloc);
return qgroup;
+ }
}
- qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
- if (!qgroup)
- return ERR_PTR(-ENOMEM);
-
+ qgroup = prealloc;
qgroup->qgroupid = qgroupid;
INIT_LIST_HEAD(&qgroup->groups);
INIT_LIST_HEAD(&qgroup->members);
INIT_LIST_HEAD(&qgroup->dirty);
+ INIT_LIST_HEAD(&qgroup->iterator);
+ INIT_LIST_HEAD(&qgroup->nested_iterator);
rb_link_node(&qgroup->node, parent, p);
rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
@@ -254,27 +275,26 @@ static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
/*
* Add relation specified by two qgroups.
*
- * Must be called with qgroup_lock held.
+ * Must be called with qgroup_lock held, the ownership of @prealloc is
+ * transferred to this function and caller should not touch it anymore.
*
* Return: 0 on success
* -ENOENT if one of the qgroups is NULL
* <0 other errors
*/
-static int __add_relation_rb(struct btrfs_qgroup *member, struct btrfs_qgroup *parent)
+static int __add_relation_rb(struct btrfs_qgroup_list *prealloc,
+ struct btrfs_qgroup *member,
+ struct btrfs_qgroup *parent)
{
- struct btrfs_qgroup_list *list;
-
- if (!member || !parent)
+ if (!member || !parent) {
+ kfree(prealloc);
return -ENOENT;
+ }
- list = kzalloc(sizeof(*list), GFP_ATOMIC);
- if (!list)
- return -ENOMEM;
-
- list->group = parent;
- list->member = member;
- list_add_tail(&list->next_group, &member->groups);
- list_add_tail(&list->next_member, &parent->members);
+ prealloc->group = parent;
+ prealloc->member = member;
+ list_add_tail(&prealloc->next_group, &member->groups);
+ list_add_tail(&prealloc->next_member, &parent->members);
return 0;
}
@@ -288,7 +308,9 @@ static int __add_relation_rb(struct btrfs_qgroup *member, struct btrfs_qgroup *p
* -ENOENT if one of the ids does not exist
* <0 other errors
*/
-static int add_relation_rb(struct btrfs_fs_info *fs_info, u64 memberid, u64 parentid)
+static int add_relation_rb(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_list *prealloc,
+ u64 memberid, u64 parentid)
{
struct btrfs_qgroup *member;
struct btrfs_qgroup *parent;
@@ -296,7 +318,7 @@ static int add_relation_rb(struct btrfs_fs_info *fs_info, u64 memberid, u64 pare
member = find_qgroup_rb(fs_info, memberid);
parent = find_qgroup_rb(fs_info, parentid);
- return __add_relation_rb(member, parent);
+ return __add_relation_rb(prealloc, member, parent);
}
/* Must be called with qgroup_lock held */
@@ -340,11 +362,22 @@ int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
static void qgroup_mark_inconsistent(struct btrfs_fs_info *fs_info)
{
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ return;
fs_info->qgroup_flags |= (BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT |
BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN |
BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING);
}
+static void qgroup_read_enable_gen(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf, int slot,
+ struct btrfs_qgroup_status_item *ptr)
+{
+ ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ ASSERT(btrfs_item_size(leaf, slot) >= sizeof(*ptr));
+ fs_info->qgroup_enable_gen = btrfs_qgroup_status_enable_gen(leaf, ptr);
+}
+
/*
* The full config is read in one go, only called from open_ctree()
* It doesn't use any locking, as at this point we're still single-threaded
@@ -361,7 +394,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
u64 flags = 0;
u64 rescan_progress = 0;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!fs_info->quota_root)
return 0;
fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
@@ -411,14 +444,14 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
"old qgroup version, quota disabled");
goto out;
}
- if (btrfs_qgroup_status_generation(l, ptr) !=
- fs_info->generation) {
+ fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, ptr);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE) {
+ qgroup_read_enable_gen(fs_info, l, slot, ptr);
+ } else if (btrfs_qgroup_status_generation(l, ptr) != fs_info->generation) {
qgroup_mark_inconsistent(fs_info);
btrfs_err(fs_info,
"qgroup generation mismatch, marked as inconsistent");
}
- fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
- ptr);
rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
goto next1;
}
@@ -434,11 +467,14 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
qgroup_mark_inconsistent(fs_info);
}
if (!qgroup) {
- qgroup = add_qgroup_rb(fs_info, found_key.offset);
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
+ struct btrfs_qgroup *prealloc;
+
+ prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
+ if (!prealloc) {
+ ret = -ENOMEM;
goto out;
}
+ qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
}
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
if (ret < 0)
@@ -489,6 +525,8 @@ next1:
if (ret)
goto out;
while (1) {
+ struct btrfs_qgroup_list *list = NULL;
+
slot = path->slots[0];
l = path->nodes[0];
btrfs_item_key_to_cpu(l, &found_key, slot);
@@ -502,8 +540,14 @@ next1:
goto next2;
}
- ret = add_relation_rb(fs_info, found_key.objectid,
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
+ if (!list) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = add_relation_rb(fs_info, list, found_key.objectid,
found_key.offset);
+ list = NULL;
if (ret == -ENOENT) {
btrfs_warn(fs_info,
"orphan qgroup relation 0x%llx->0x%llx",
@@ -522,13 +566,12 @@ next2:
out:
btrfs_free_path(path);
fs_info->qgroup_flags |= flags;
- if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
- clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
- ret >= 0)
- ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
-
- if (ret < 0) {
+ if (ret >= 0) {
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
+ ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
+ } else {
ulist_free(fs_info->qgroup_ulist);
fs_info->qgroup_ulist = NULL;
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
@@ -550,7 +593,7 @@ bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
struct rb_node *node;
bool ret = false;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
return ret;
/*
* Since we're unmounting, there is no race and no need to grab qgroup
@@ -622,7 +665,7 @@ static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_free_path(path);
return ret;
@@ -700,7 +743,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
@@ -719,7 +762,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
out:
@@ -808,7 +851,7 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
- btrfs_mark_buffer_dirty(l);
+ btrfs_mark_buffer_dirty(trans, l);
out:
btrfs_free_path(path);
@@ -854,7 +897,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
- btrfs_mark_buffer_dirty(l);
+ btrfs_mark_buffer_dirty(trans, l);
out:
btrfs_free_path(path);
@@ -896,7 +939,7 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
btrfs_set_qgroup_status_rescan(l, ptr,
fs_info->qgroup_rescan_progress.objectid);
- btrfs_mark_buffer_dirty(l);
+ btrfs_mark_buffer_dirty(trans, l);
out:
btrfs_free_path(path);
@@ -949,7 +992,8 @@ out:
return ret;
}
-int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_quota_ctl_args *quota_ctl_args)
{
struct btrfs_root *quota_root;
struct btrfs_root *tree_root = fs_info->tree_root;
@@ -959,8 +1003,10 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_qgroup *qgroup = NULL;
+ struct btrfs_qgroup *prealloc = NULL;
struct btrfs_trans_handle *trans = NULL;
struct ulist *ulist = NULL;
+ const bool simple = (quota_ctl_args->cmd == BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA);
int ret = 0;
int slot;
@@ -1063,13 +1109,18 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
struct btrfs_qgroup_status_item);
btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
- fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON;
+ if (simple) {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
+ btrfs_set_qgroup_status_enable_gen(leaf, ptr, trans->transid);
+ } else {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ }
btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAGS_MASK);
btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
key.objectid = 0;
key.type = BTRFS_ROOT_REF_KEY;
@@ -1094,6 +1145,15 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
/* Release locks on tree_root before we access quota_root */
btrfs_release_path(path);
+ /* We should not have a stray @prealloc pointer. */
+ ASSERT(prealloc == NULL);
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ btrfs_abort_transaction(trans, ret);
+ goto out_free_path;
+ }
+
ret = add_qgroup_item(trans, quota_root,
found_key.offset);
if (ret) {
@@ -1101,7 +1161,8 @@ int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
goto out_free_path;
}
- qgroup = add_qgroup_rb(fs_info, found_key.offset);
+ qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
+ prealloc = NULL;
if (IS_ERR(qgroup)) {
ret = PTR_ERR(qgroup);
btrfs_abort_transaction(trans, ret);
@@ -1144,18 +1205,22 @@ out_add_root:
goto out_free_path;
}
- qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
- btrfs_abort_transaction(trans, ret);
+ ASSERT(prealloc == NULL);
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
goto out_free_path;
}
+ qgroup = add_qgroup_rb(fs_info, prealloc, BTRFS_FS_TREE_OBJECTID);
+ prealloc = NULL;
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
+ fs_info->qgroup_enable_gen = trans->transid;
+
mutex_unlock(&fs_info->qgroup_ioctl_lock);
/*
* Commit the transaction while not holding qgroup_ioctl_lock, to avoid
@@ -1180,8 +1245,14 @@ out_add_root:
spin_lock(&fs_info->qgroup_lock);
fs_info->quota_root = quota_root;
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ if (simple)
+ btrfs_set_fs_incompat(fs_info, SIMPLE_QUOTA);
spin_unlock(&fs_info->qgroup_lock);
+ /* Skip rescan for simple qgroups. */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ goto out_free_path;
+
ret = qgroup_rescan_init(fs_info, 0, 1);
if (!ret) {
qgroup_rescan_zero_tracking(fs_info);
@@ -1222,6 +1293,39 @@ out:
else if (trans)
ret = btrfs_end_transaction(trans);
ulist_free(ulist);
+ kfree(prealloc);
+ return ret;
+}
+
+/*
+ * It is possible to have outstanding ordered extents which reserved bytes
+ * before we disabled. We need to fully flush delalloc, ordered extents, and a
+ * commit to ensure that we don't leak such reservations, only to have them
+ * come back if we re-enable.
+ *
+ * - enable simple quotas
+ * - reserve space
+ * - release it, store rsv_bytes in OE
+ * - disable quotas
+ * - enable simple quotas (qgroup rsv are all 0)
+ * - OE finishes
+ * - run delayed refs
+ * - free rsv_bytes, resulting in miscounting or even underflow
+ */
+static int flush_reservations(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_trans_handle *trans;
+ int ret;
+
+ ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false);
+ if (ret)
+ return ret;
+ btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+ trans = btrfs_join_transaction(fs_info->tree_root);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ btrfs_commit_transaction(trans);
+
return ret;
}
@@ -1269,6 +1373,10 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
+ ret = flush_reservations(fs_info);
+ if (ret)
+ goto out_unlock_cleaner;
+
/*
* 1 For the root item
*
@@ -1295,6 +1403,7 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE;
fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
spin_unlock(&fs_info->qgroup_lock);
@@ -1329,7 +1438,8 @@ out:
if (ret && trans)
btrfs_end_transaction(trans);
else if (trans)
- ret = btrfs_end_transaction(trans);
+ ret = btrfs_commit_transaction(trans);
+out_unlock_cleaner:
mutex_unlock(&fs_info->cleaner_mutex);
return ret;
@@ -1342,6 +1452,24 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
}
+static void qgroup_iterator_add(struct list_head *head, struct btrfs_qgroup *qgroup)
+{
+ if (!list_empty(&qgroup->iterator))
+ return;
+
+ list_add_tail(&qgroup->iterator, head);
+}
+
+static void qgroup_iterator_clean(struct list_head *head)
+{
+ while (!list_empty(head)) {
+ struct btrfs_qgroup *qgroup;
+
+ qgroup = list_first_entry(head, struct btrfs_qgroup, iterator);
+ list_del_init(&qgroup->iterator);
+ }
+}
+
/*
* The easy accounting, we're updating qgroup relationship whose child qgroup
* only has exclusive extents.
@@ -1356,14 +1484,12 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
*
* Caller should hold fs_info->qgroup_lock.
*/
-static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
- struct ulist *tmp, u64 ref_root,
+static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, u64 ref_root,
struct btrfs_qgroup *src, int sign)
{
struct btrfs_qgroup *qgroup;
- struct btrfs_qgroup_list *glist;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
+ struct btrfs_qgroup *cur;
+ LIST_HEAD(qgroup_list);
u64 num_bytes = src->excl;
int ret = 0;
@@ -1371,53 +1497,30 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
if (!qgroup)
goto out;
- qgroup->rfer += sign * num_bytes;
- qgroup->rfer_cmpr += sign * num_bytes;
-
- WARN_ON(sign < 0 && qgroup->excl < num_bytes);
- qgroup->excl += sign * num_bytes;
- qgroup->excl_cmpr += sign * num_bytes;
-
- if (sign > 0)
- qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
- else
- qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
-
- qgroup_dirty(fs_info, qgroup);
-
- /* Get all of the parent groups that contain this qgroup */
- list_for_each_entry(glist, &qgroup->groups, next_group) {
- ret = ulist_add(tmp, glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(cur, &qgroup_list, iterator) {
+ struct btrfs_qgroup_list *glist;
- /* Iterate all of the parents and adjust their reference counts */
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(tmp, &uiter))) {
- qgroup = unode_aux_to_qgroup(unode);
qgroup->rfer += sign * num_bytes;
qgroup->rfer_cmpr += sign * num_bytes;
+
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
qgroup->excl += sign * num_bytes;
+ qgroup->excl_cmpr += sign * num_bytes;
+
if (sign > 0)
qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
else
qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
- qgroup->excl_cmpr += sign * num_bytes;
qgroup_dirty(fs_info, qgroup);
- /* Add any parents of the parents */
- list_for_each_entry(glist, &qgroup->groups, next_group) {
- ret = ulist_add(tmp, glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+ /* Append parent qgroups to @qgroup_list. */
+ list_for_each_entry(glist, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
ret = 0;
out:
+ qgroup_iterator_clean(&qgroup_list);
return ret;
}
@@ -1434,8 +1537,7 @@ out:
* Return < 0 for other error.
*/
static int quick_update_accounting(struct btrfs_fs_info *fs_info,
- struct ulist *tmp, u64 src, u64 dst,
- int sign)
+ u64 src, u64 dst, int sign)
{
struct btrfs_qgroup *qgroup;
int ret = 1;
@@ -1446,8 +1548,7 @@ static int quick_update_accounting(struct btrfs_fs_info *fs_info,
goto out;
if (qgroup->excl == qgroup->rfer) {
ret = 0;
- err = __qgroup_excl_accounting(fs_info, tmp, dst,
- qgroup, sign);
+ err = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
if (err < 0) {
ret = err;
goto out;
@@ -1459,28 +1560,19 @@ out:
return ret;
}
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
- u64 dst)
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
- struct ulist *tmp;
- unsigned int nofs_flag;
+ struct btrfs_qgroup_list *prealloc = NULL;
int ret = 0;
/* Check the level of src and dst first */
if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
return -EINVAL;
- /* We hold a transaction handle open, must do a NOFS allocation. */
- nofs_flag = memalloc_nofs_save();
- tmp = ulist_alloc(GFP_KERNEL);
- memalloc_nofs_restore(nofs_flag);
- if (!tmp)
- return -ENOMEM;
-
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root) {
ret = -ENOTCONN;
@@ -1501,6 +1593,11 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
}
}
+ prealloc = kzalloc(sizeof(*list), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = add_qgroup_relation_item(trans, src, dst);
if (ret)
goto out;
@@ -1512,16 +1609,17 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
}
spin_lock(&fs_info->qgroup_lock);
- ret = __add_relation_rb(member, parent);
+ ret = __add_relation_rb(prealloc, member, parent);
+ prealloc = NULL;
if (ret < 0) {
spin_unlock(&fs_info->qgroup_lock);
goto out;
}
- ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
+ ret = quick_update_accounting(fs_info, src, dst, 1);
spin_unlock(&fs_info->qgroup_lock);
out:
+ kfree(prealloc);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
- ulist_free(tmp);
return ret;
}
@@ -1532,19 +1630,10 @@ static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
struct btrfs_qgroup *parent;
struct btrfs_qgroup *member;
struct btrfs_qgroup_list *list;
- struct ulist *tmp;
bool found = false;
- unsigned int nofs_flag;
int ret = 0;
int ret2;
- /* We hold a transaction handle open, must do a NOFS allocation. */
- nofs_flag = memalloc_nofs_save();
- tmp = ulist_alloc(GFP_KERNEL);
- memalloc_nofs_restore(nofs_flag);
- if (!tmp)
- return -ENOMEM;
-
if (!fs_info->quota_root) {
ret = -ENOTCONN;
goto out;
@@ -1582,11 +1671,10 @@ delete_item:
if (found) {
spin_lock(&fs_info->qgroup_lock);
del_relation_rb(fs_info, src, dst);
- ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
+ ret = quick_update_accounting(fs_info, src, dst, -1);
spin_unlock(&fs_info->qgroup_lock);
}
out:
- ulist_free(tmp);
return ret;
}
@@ -1608,8 +1696,12 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup *prealloc = NULL;
int ret = 0;
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED)
+ return 0;
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root) {
ret = -ENOTCONN;
@@ -1622,21 +1714,25 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
goto out;
}
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = add_qgroup_item(trans, quota_root, qgroupid);
if (ret)
goto out;
spin_lock(&fs_info->qgroup_lock);
- qgroup = add_qgroup_rb(fs_info, qgroupid);
+ qgroup = add_qgroup_rb(fs_info, prealloc, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
+ prealloc = NULL;
- if (IS_ERR(qgroup)) {
- ret = PTR_ERR(qgroup);
- goto out;
- }
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ kfree(prealloc);
return ret;
}
@@ -1771,6 +1867,17 @@ out:
return ret;
}
+/*
+ * Inform qgroup to trace one dirty extent, its info is recorded in @record.
+ * So qgroup can account it at transaction committing time.
+ *
+ * No lock version, caller must acquire delayed ref lock and allocated memory,
+ * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
+ *
+ * Return 0 for success insert
+ * Return >0 for existing record, caller can free @record safely.
+ * Error is not possible
+ */
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record)
@@ -1780,6 +1887,9 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_extent_record *entry;
u64 bytenr = record->bytenr;
+ if (!btrfs_qgroup_full_accounting(fs_info))
+ return 0;
+
lockdep_assert_held(&delayed_refs->lock);
trace_btrfs_qgroup_trace_extent(fs_info, record);
@@ -1806,12 +1916,35 @@ int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
return 0;
}
+/*
+ * Post handler after qgroup_trace_extent_nolock().
+ *
+ * NOTE: Current qgroup does the expensive backref walk at transaction
+ * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
+ * new transaction.
+ * This is designed to allow btrfs_find_all_roots() to get correct new_roots
+ * result.
+ *
+ * However for old_roots there is no need to do backref walk at that time,
+ * since we search commit roots to walk backref and result will always be
+ * correct.
+ *
+ * Due to the nature of no lock version, we can't do backref there.
+ * So we must call btrfs_qgroup_trace_extent_post() after exiting
+ * spinlock context.
+ *
+ * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
+ * using current root, then we can move all expensive backref walk out of
+ * transaction committing, but not now as qgroup accounting will be wrong again.
+ */
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord)
{
struct btrfs_backref_walk_ctx ctx = { 0 };
int ret;
+ if (!btrfs_qgroup_full_accounting(trans->fs_info))
+ return 0;
/*
* We are always called in a context where we are already holding a
* transaction handle. Often we are called when adding a data delayed
@@ -1859,6 +1992,19 @@ int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
return 0;
}
+/*
+ * Inform qgroup to trace one dirty extent, specified by @bytenr and
+ * @num_bytes.
+ * So qgroup can account it at commit trans time.
+ *
+ * Better encapsulated version, with memory allocation and backref walk for
+ * commit roots.
+ * So this can sleep.
+ *
+ * Return 0 if the operation is done.
+ * Return <0 for error, like memory allocation failure or invalid parameter
+ * (NULL trans)
+ */
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes)
{
@@ -1867,8 +2013,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
- || bytenr == 0 || num_bytes == 0)
+ if (!btrfs_qgroup_full_accounting(fs_info) || bytenr == 0 || num_bytes == 0)
return 0;
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record)
@@ -1889,6 +2034,12 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
return btrfs_qgroup_trace_extent_post(trans, record);
}
+/*
+ * Inform qgroup to trace all leaf items of data
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM)
+ */
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct extent_buffer *eb)
{
@@ -1900,7 +2051,7 @@ int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
u64 bytenr, num_bytes;
/* We can be called directly from walk_up_proc() */
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
for (i = 0; i < nr; i++) {
@@ -2276,7 +2427,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
int level;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
/* Wrong parameter order */
@@ -2319,6 +2470,16 @@ out:
return ret;
}
+/*
+ * Inform qgroup to trace a whole subtree, including all its child tree
+ * blocks and data.
+ * The root tree block is specified by @root_eb.
+ *
+ * Normally used by relocation(tree block swap) and subvolume deletion.
+ *
+ * Return 0 for success
+ * Return <0 for error(ENOMEM or tree search error)
+ */
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)
@@ -2333,7 +2494,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
BUG_ON(root_eb == NULL);
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
spin_lock(&fs_info->qgroup_lock);
@@ -2445,62 +2606,64 @@ out:
return ret;
}
+static void qgroup_iterator_nested_add(struct list_head *head, struct btrfs_qgroup *qgroup)
+{
+ if (!list_empty(&qgroup->nested_iterator))
+ return;
+
+ list_add_tail(&qgroup->nested_iterator, head);
+}
+
+static void qgroup_iterator_nested_clean(struct list_head *head)
+{
+ while (!list_empty(head)) {
+ struct btrfs_qgroup *qgroup;
+
+ qgroup = list_first_entry(head, struct btrfs_qgroup, nested_iterator);
+ list_del_init(&qgroup->nested_iterator);
+ }
+}
+
#define UPDATE_NEW 0
#define UPDATE_OLD 1
/*
* Walk all of the roots that points to the bytenr and adjust their refcnts.
*/
-static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
- struct ulist *roots, struct ulist *tmp,
- struct ulist *qgroups, u64 seq, int update_old)
+static void qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
+ struct ulist *roots, struct list_head *qgroups,
+ u64 seq, int update_old)
{
struct ulist_node *unode;
struct ulist_iterator uiter;
- struct ulist_node *tmp_unode;
- struct ulist_iterator tmp_uiter;
struct btrfs_qgroup *qg;
- int ret = 0;
if (!roots)
- return 0;
+ return;
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(roots, &uiter))) {
+ LIST_HEAD(tmp);
+
qg = find_qgroup_rb(fs_info, unode->val);
if (!qg)
continue;
- ulist_reinit(tmp);
- ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
- GFP_ATOMIC);
- if (ret < 0)
- return ret;
- ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
- if (ret < 0)
- return ret;
- ULIST_ITER_INIT(&tmp_uiter);
- while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ qgroup_iterator_nested_add(qgroups, qg);
+ qgroup_iterator_add(&tmp, qg);
+ list_for_each_entry(qg, &tmp, iterator) {
struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(tmp_unode);
if (update_old)
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
else
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
+
list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(qgroups, glist->group->qgroupid,
- qgroup_to_aux(glist->group),
- GFP_ATOMIC);
- if (ret < 0)
- return ret;
- ret = ulist_add(tmp, glist->group->qgroupid,
- qgroup_to_aux(glist->group),
- GFP_ATOMIC);
- if (ret < 0)
- return ret;
+ qgroup_iterator_nested_add(qgroups, glist->group);
+ qgroup_iterator_add(&tmp, glist->group);
}
}
+ qgroup_iterator_clean(&tmp);
}
- return 0;
}
/*
@@ -2539,22 +2702,16 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
* But this time we don't need to consider other things, the codes and logic
* is easy to understand now.
*/
-static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
- struct ulist *qgroups,
- u64 nr_old_roots,
- u64 nr_new_roots,
- u64 num_bytes, u64 seq)
+static void qgroup_update_counters(struct btrfs_fs_info *fs_info,
+ struct list_head *qgroups, u64 nr_old_roots,
+ u64 nr_new_roots, u64 num_bytes, u64 seq)
{
- struct ulist_node *unode;
- struct ulist_iterator uiter;
struct btrfs_qgroup *qg;
- u64 cur_new_count, cur_old_count;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(qgroups, &uiter))) {
+ list_for_each_entry(qg, qgroups, nested_iterator) {
+ u64 cur_new_count, cur_old_count;
bool dirty = false;
- qg = unode_aux_to_qgroup(unode);
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
@@ -2625,7 +2782,6 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
if (dirty)
qgroup_dirty(fs_info, qg);
}
- return 0;
}
/*
@@ -2662,8 +2818,7 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
struct ulist *new_roots)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct ulist *qgroups = NULL;
- struct ulist *tmp = NULL;
+ LIST_HEAD(qgroups);
u64 seq;
u64 nr_new_roots = 0;
u64 nr_old_roots = 0;
@@ -2673,7 +2828,7 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
* If quotas get disabled meanwhile, the resources need to be freed and
* we can't just exit here.
*/
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+ if (!btrfs_qgroup_full_accounting(fs_info) ||
fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING)
goto out_free;
@@ -2697,17 +2852,6 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
num_bytes, nr_old_roots, nr_new_roots);
- qgroups = ulist_alloc(GFP_NOFS);
- if (!qgroups) {
- ret = -ENOMEM;
- goto out_free;
- }
- tmp = ulist_alloc(GFP_NOFS);
- if (!tmp) {
- ret = -ENOMEM;
- goto out_free;
- }
-
mutex_lock(&fs_info->qgroup_rescan_lock);
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
@@ -2722,29 +2866,21 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
seq = fs_info->qgroup_seq;
/* Update old refcnts using old_roots */
- ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
- UPDATE_OLD);
- if (ret < 0)
- goto out;
+ qgroup_update_refcnt(fs_info, old_roots, &qgroups, seq, UPDATE_OLD);
/* Update new refcnts using new_roots */
- ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
- UPDATE_NEW);
- if (ret < 0)
- goto out;
+ qgroup_update_refcnt(fs_info, new_roots, &qgroups, seq, UPDATE_NEW);
- qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
+ qgroup_update_counters(fs_info, &qgroups, nr_old_roots, nr_new_roots,
num_bytes, seq);
/*
* Bump qgroup_seq to avoid seq overlap
*/
fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
-out:
spin_unlock(&fs_info->qgroup_lock);
out_free:
- ulist_free(tmp);
- ulist_free(qgroups);
+ qgroup_iterator_nested_clean(&qgroups);
ulist_free(old_roots);
ulist_free(new_roots);
return ret;
@@ -2761,6 +2897,9 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
u64 qgroup_to_skip;
int ret = 0;
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ return 0;
+
delayed_refs = &trans->transaction->delayed_refs;
qgroup_to_skip = delayed_refs->qgroup_to_skip;
while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
@@ -2876,7 +3015,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
qgroup_mark_inconsistent(fs_info);
spin_lock(&fs_info->qgroup_lock);
}
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (btrfs_qgroup_enabled(fs_info))
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
else
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
@@ -2889,6 +3028,47 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
return ret;
}
+static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
+ u64 inode_rootid,
+ struct btrfs_qgroup_inherit **inherit)
+{
+ int i = 0;
+ u64 num_qgroups = 0;
+ struct btrfs_qgroup *inode_qg;
+ struct btrfs_qgroup_list *qg_list;
+ struct btrfs_qgroup_inherit *res;
+ size_t struct_sz;
+ u64 *qgids;
+
+ if (*inherit)
+ return -EEXIST;
+
+ inode_qg = find_qgroup_rb(fs_info, inode_rootid);
+ if (!inode_qg)
+ return -ENOENT;
+
+ num_qgroups = list_count_nodes(&inode_qg->groups);
+
+ if (!num_qgroups)
+ return 0;
+
+ struct_sz = struct_size(res, qgroups, num_qgroups);
+ if (struct_sz == SIZE_MAX)
+ return -ERANGE;
+
+ res = kzalloc(struct_sz, GFP_NOFS);
+ if (!res)
+ return -ENOMEM;
+ res->num_qgroups = num_qgroups;
+ qgids = res->qgroups;
+
+ list_for_each_entry(qg_list, &inode_qg->groups, next_group)
+ qgids[i] = qg_list->group->qgroupid;
+
+ *inherit = res;
+ return 0;
+}
+
/*
* Copy the accounting information between qgroups. This is necessary
* when a snapshot or a subvolume is created. Throwing an error will
@@ -2896,7 +3076,8 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
* when a readonly fs is a reasonable outcome.
*/
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
- u64 objectid, struct btrfs_qgroup_inherit *inherit)
+ u64 objectid, u64 inode_rootid,
+ struct btrfs_qgroup_inherit *inherit)
{
int ret = 0;
int i;
@@ -2906,10 +3087,17 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
+ struct btrfs_qgroup *prealloc;
+ struct btrfs_qgroup_list **qlist_prealloc = NULL;
+ bool free_inherit = false;
bool need_rescan = false;
u32 level_size = 0;
u64 nums;
+ prealloc = kzalloc(sizeof(*prealloc), GFP_NOFS);
+ if (!prealloc)
+ return -ENOMEM;
+
/*
* There are only two callers of this function.
*
@@ -2929,7 +3117,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
if (!committing)
mutex_lock(&fs_info->qgroup_ioctl_lock);
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_enabled(fs_info))
goto out;
quota_root = fs_info->quota_root;
@@ -2938,6 +3126,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
goto out;
}
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && !inherit) {
+ ret = qgroup_auto_inherit(fs_info, inode_rootid, &inherit);
+ if (ret)
+ goto out;
+ free_inherit = true;
+ }
+
if (inherit) {
i_qgroups = (u64 *)(inherit + 1);
nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
@@ -2982,16 +3177,28 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
goto out;
}
ret = 0;
- }
+ qlist_prealloc = kcalloc(inherit->num_qgroups,
+ sizeof(struct btrfs_qgroup_list *),
+ GFP_NOFS);
+ if (!qlist_prealloc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (int i = 0; i < inherit->num_qgroups; i++) {
+ qlist_prealloc[i] = kzalloc(sizeof(struct btrfs_qgroup_list),
+ GFP_NOFS);
+ if (!qlist_prealloc[i]) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ }
spin_lock(&fs_info->qgroup_lock);
- dstgroup = add_qgroup_rb(fs_info, objectid);
- if (IS_ERR(dstgroup)) {
- ret = PTR_ERR(dstgroup);
- goto unlock;
- }
+ dstgroup = add_qgroup_rb(fs_info, prealloc, objectid);
+ prealloc = NULL;
if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
dstgroup->lim_flags = inherit->lim.flags;
@@ -3003,7 +3210,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
qgroup_dirty(fs_info, dstgroup);
}
- if (srcid) {
+ if (srcid && btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL) {
srcgroup = find_qgroup_rb(fs_info, srcid);
if (!srcgroup)
goto unlock;
@@ -3038,7 +3245,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
i_qgroups = (u64 *)(inherit + 1);
for (i = 0; i < inherit->num_qgroups; ++i) {
if (*i_qgroups) {
- ret = add_relation_rb(fs_info, objectid, *i_qgroups);
+ ret = add_relation_rb(fs_info, qlist_prealloc[i], objectid,
+ *i_qgroups);
+ qlist_prealloc[i] = NULL;
if (ret)
goto unlock;
}
@@ -3102,6 +3311,14 @@ out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
if (need_rescan)
qgroup_mark_inconsistent(fs_info);
+ if (qlist_prealloc) {
+ for (int i = 0; i < inherit->num_qgroups; i++)
+ kfree(qlist_prealloc[i]);
+ kfree(qlist_prealloc);
+ }
+ if (free_inherit)
+ kfree(inherit);
+ kfree(prealloc);
return ret;
}
@@ -3125,8 +3342,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
struct btrfs_fs_info *fs_info = root->fs_info;
u64 ref_root = root->root_key.objectid;
int ret = 0;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
+ LIST_HEAD(qgroup_list);
if (!is_fstree(ref_root))
return 0;
@@ -3146,49 +3362,28 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
if (!qgroup)
goto out;
- /*
- * in a first step, we check all affected qgroups if any limits would
- * be exceeded
- */
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- qgroup_to_aux(qgroup), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(unode);
-
- if (enforce && !qgroup_check_limits(qg, num_bytes)) {
+ if (enforce && !qgroup_check_limits(qgroup, num_bytes)) {
ret = -EDQUOT;
goto out;
}
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+ list_for_each_entry(glist, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
+
ret = 0;
/*
* no limits exceeded, now record the reservation into all qgroups
*/
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
-
- qg = unode_aux_to_qgroup(unode);
-
- qgroup_rsv_add(fs_info, qg, num_bytes, type);
- }
+ list_for_each_entry(qgroup, &qgroup_list, iterator)
+ qgroup_rsv_add(fs_info, qgroup, num_bytes, type);
out:
+ qgroup_iterator_clean(&qgroup_list);
spin_unlock(&fs_info->qgroup_lock);
return ret;
}
@@ -3207,9 +3402,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
enum btrfs_qgroup_rsv_type type)
{
struct btrfs_qgroup *qgroup;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
- int ret = 0;
+ LIST_HEAD(qgroup_list);
if (!is_fstree(ref_root))
return;
@@ -3237,30 +3430,17 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
*/
num_bytes = qgroup->rsv.values[type];
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- qgroup_to_aux(qgroup), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(unode);
-
- qgroup_rsv_release(fs_info, qg, num_bytes, type);
-
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
+ qgroup_rsv_release(fs_info, qgroup, num_bytes, type);
+ list_for_each_entry(glist, &qgroup->groups, next_group) {
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
}
-
out:
+ qgroup_iterator_clean(&qgroup_list);
spin_unlock(&fs_info->qgroup_lock);
}
@@ -3295,6 +3475,9 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
int slot;
int ret;
+ if (!btrfs_qgroup_full_accounting(fs_info))
+ return 1;
+
mutex_lock(&fs_info->qgroup_rescan_lock);
extent_root = btrfs_extent_root(fs_info,
fs_info->qgroup_rescan_progress.objectid);
@@ -3375,10 +3558,15 @@ out:
static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
{
- return btrfs_fs_closing(fs_info) ||
- test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state) ||
- !test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
- fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN;
+ if (btrfs_fs_closing(fs_info))
+ return true;
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+ return true;
+ if (!btrfs_qgroup_enabled(fs_info))
+ return true;
+ if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
+ return true;
+ return false;
}
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
@@ -3392,6 +3580,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
bool stopped = false;
bool did_leaf_rescans = false;
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ return;
+
path = btrfs_alloc_path();
if (!path)
goto out;
@@ -3495,6 +3686,11 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
{
int ret = 0;
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
+ btrfs_warn(fs_info, "qgroup rescan init failed, running in simple mode");
+ return -EINVAL;
+ }
+
if (!init_flags) {
/* we're resuming qgroup rescan at mount time */
if (!(fs_info->qgroup_flags &
@@ -3525,7 +3721,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
ret = -EINVAL;
- } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+ } else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
/* Quota disable is in progress */
ret = -EBUSY;
}
@@ -3546,7 +3742,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
mutex_unlock(&fs_info->qgroup_rescan_lock);
btrfs_init_work(&fs_info->qgroup_rescan_work,
- btrfs_qgroup_rescan_worker, NULL, NULL);
+ btrfs_qgroup_rescan_worker, NULL);
return 0;
}
@@ -3784,7 +3980,7 @@ static int qgroup_reserve_data(struct btrfs_inode *inode,
u64 to_reserve;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
+ if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
!is_fstree(root->root_key.objectid) || len == 0)
return 0;
@@ -3916,8 +4112,12 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
int trace_op = QGROUP_RELEASE;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
- return 0;
+ if (btrfs_qgroup_mode(inode->root->fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
+ extent_changeset_init(&changeset);
+ return clear_record_extent_bits(&inode->io_tree, start,
+ start + len - 1,
+ EXTENT_QGROUP_RESERVED, &changeset);
+ }
/* In release case, we shouldn't have @reserved */
WARN_ON(!free && reserved);
@@ -4027,7 +4227,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
!is_fstree(root->root_key.objectid) || num_bytes == 0)
return 0;
@@ -4064,11 +4264,15 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
}
+/*
+ * Per-transaction meta reservation should be all freed at transaction commit
+ * time
+ */
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
!is_fstree(root->root_key.objectid))
return;
@@ -4084,7 +4288,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
!is_fstree(root->root_key.objectid))
return;
@@ -4104,9 +4308,7 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
int num_bytes)
{
struct btrfs_qgroup *qgroup;
- struct ulist_node *unode;
- struct ulist_iterator uiter;
- int ret = 0;
+ LIST_HEAD(qgroup_list);
if (num_bytes == 0)
return;
@@ -4117,39 +4319,35 @@ static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
qgroup = find_qgroup_rb(fs_info, ref_root);
if (!qgroup)
goto out;
- ulist_reinit(fs_info->qgroup_ulist);
- ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
- qgroup_to_aux(qgroup), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- ULIST_ITER_INIT(&uiter);
- while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
- struct btrfs_qgroup *qg;
- struct btrfs_qgroup_list *glist;
- qg = unode_aux_to_qgroup(unode);
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qgroup, &qgroup_list, iterator) {
+ struct btrfs_qgroup_list *glist;
- qgroup_rsv_release(fs_info, qg, num_bytes,
+ qgroup_rsv_release(fs_info, qgroup, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
- qgroup_rsv_add(fs_info, qg, num_bytes,
+ qgroup_rsv_add(fs_info, qgroup, num_bytes,
BTRFS_QGROUP_RSV_META_PERTRANS);
- list_for_each_entry(glist, &qg->groups, next_group) {
- ret = ulist_add(fs_info->qgroup_ulist,
- glist->group->qgroupid,
- qgroup_to_aux(glist->group), GFP_ATOMIC);
- if (ret < 0)
- goto out;
- }
+
+ list_for_each_entry(glist, &qgroup->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
}
out:
+ qgroup_iterator_clean(&qgroup_list);
spin_unlock(&fs_info->qgroup_lock);
}
+/*
+ * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
+ *
+ * This is called when preallocated meta reservation needs to be used.
+ * Normally after btrfs_join_transaction() call.
+ */
void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
!is_fstree(root->root_key.objectid))
return;
/* Same as btrfs_qgroup_free_meta_prealloc() */
@@ -4257,7 +4455,7 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
int level = btrfs_header_level(subvol_parent) - 1;
int ret = 0;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
@@ -4367,7 +4565,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
int ret = 0;
int i;
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
return 0;
@@ -4450,3 +4648,53 @@ void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
}
*root = RB_ROOT;
}
+
+int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
+ struct btrfs_squota_delta *delta)
+{
+ int ret;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup *qg;
+ LIST_HEAD(qgroup_list);
+ u64 root = delta->root;
+ u64 num_bytes = delta->num_bytes;
+ const int sign = (delta->is_inc ? 1 : -1);
+
+ if (btrfs_qgroup_mode(fs_info) != BTRFS_QGROUP_MODE_SIMPLE)
+ return 0;
+
+ if (!is_fstree(root))
+ return 0;
+
+ /* If the extent predates enabling quotas, don't count it. */
+ if (delta->generation < fs_info->qgroup_enable_gen)
+ return 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ qgroup = find_qgroup_rb(fs_info, root);
+ if (!qgroup) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = 0;
+ qgroup_iterator_add(&qgroup_list, qgroup);
+ list_for_each_entry(qg, &qgroup_list, iterator) {
+ struct btrfs_qgroup_list *glist;
+
+ qg->excl += num_bytes * sign;
+ qg->rfer += num_bytes * sign;
+ qgroup_dirty(fs_info, qg);
+
+ list_for_each_entry(glist, &qg->groups, next_group)
+ qgroup_iterator_add(&qgroup_list, glist->group);
+ }
+ qgroup_iterator_clean(&qgroup_list);
+
+out:
+ spin_unlock(&fs_info->qgroup_lock);
+ if (!ret && delta->rsv_bytes)
+ btrfs_qgroup_free_refroot(fs_info, root, delta->rsv_bytes,
+ BTRFS_QGROUP_RSV_DATA);
+ return ret;
+}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 7bffa10589d6..855a4f978761 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -101,8 +101,15 @@
* subtree rescan for them.
*/
-#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1UL << 3)
-#define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1UL << 4)
+/*
+ * These flags share the flags field of the btrfs_qgroup_status_item with the
+ * persisted flags defined in btrfs_tree.h.
+ *
+ * To minimize the chance of collision with new persisted status flags, these
+ * count backwards from the MSB.
+ */
+#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1ULL << 63)
+#define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1ULL << 62)
/*
* Record a dirty extent, and info qgroup to update quota on it
@@ -220,6 +227,33 @@ struct btrfs_qgroup {
struct list_head groups; /* groups this group is member of */
struct list_head members; /* groups that are members of this group */
struct list_head dirty; /* dirty groups */
+
+ /*
+ * For qgroup iteration usage.
+ *
+ * The iteration list should always be empty until qgroup_iterator_add()
+ * is called. And should be reset to empty after the iteration is
+ * finished.
+ */
+ struct list_head iterator;
+
+ /*
+ * For nested iterator usage.
+ *
+ * Here we support at most one level of nested iterator calls like:
+ *
+ * LIST_HEAD(all_qgroups);
+ * {
+ * LIST_HEAD(local_qgroups);
+ * qgroup_iterator_add(local_qgroups, qg);
+ * qgroup_iterator_nested_add(all_qgroups, qg);
+ * do_some_work(local_qgroups);
+ * qgroup_iterator_clean(local_qgroups);
+ * }
+ * do_some_work(all_qgroups);
+ * qgroup_iterator_nested_clean(all_qgroups);
+ */
+ struct list_head nested_iterator;
struct rb_node node; /* tree of qgroups */
/*
@@ -235,6 +269,21 @@ struct btrfs_qgroup {
struct kobject kobj;
};
+struct btrfs_squota_delta {
+ /* The fstree root this delta counts against. */
+ u64 root;
+ /* The number of bytes in the extent being counted. */
+ u64 num_bytes;
+ /* The number of bytes reserved for this extent. */
+ u64 rsv_bytes;
+ /* The generation the extent was created in. */
+ u64 generation;
+ /* Whether we are using or freeing the extent. */
+ bool is_inc;
+ /* Whether the extent is data or metadata. */
+ bool is_data;
+};
+
static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
{
return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
@@ -249,14 +298,23 @@ enum {
ENUM_BIT(QGROUP_FREE),
};
-int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
+enum btrfs_qgroup_mode {
+ BTRFS_QGROUP_MODE_DISABLED,
+ BTRFS_QGROUP_MODE_FULL,
+ BTRFS_QGROUP_MODE_SIMPLE
+};
+
+enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info);
+bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info);
+bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info);
+int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
+ struct btrfs_ioctl_quota_ctl_args *quota_ctl_args);
int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
bool interruptible);
-int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
- u64 dst);
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
u64 dst);
int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
@@ -267,80 +325,16 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
struct btrfs_delayed_extent_op;
-/*
- * Inform qgroup to trace one dirty extent, its info is recorded in @record.
- * So qgroup can account it at transaction committing time.
- *
- * No lock version, caller must acquire delayed ref lock and allocated memory,
- * then call btrfs_qgroup_trace_extent_post() after exiting lock context.
- *
- * Return 0 for success insert
- * Return >0 for existing record, caller can free @record safely.
- * Error is not possible
- */
int btrfs_qgroup_trace_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record);
-
-/*
- * Post handler after qgroup_trace_extent_nolock().
- *
- * NOTE: Current qgroup does the expensive backref walk at transaction
- * committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
- * new transaction.
- * This is designed to allow btrfs_find_all_roots() to get correct new_roots
- * result.
- *
- * However for old_roots there is no need to do backref walk at that time,
- * since we search commit roots to walk backref and result will always be
- * correct.
- *
- * Due to the nature of no lock version, we can't do backref there.
- * So we must call btrfs_qgroup_trace_extent_post() after exiting
- * spinlock context.
- *
- * TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
- * using current root, then we can move all expensive backref walk out of
- * transaction committing, but not now as qgroup accounting will be wrong again.
- */
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord);
-
-/*
- * Inform qgroup to trace one dirty extent, specified by @bytenr and
- * @num_bytes.
- * So qgroup can account it at commit trans time.
- *
- * Better encapsulated version, with memory allocation and backref walk for
- * commit roots.
- * So this can sleep.
- *
- * Return 0 if the operation is done.
- * Return <0 for error, like memory allocation failure or invalid parameter
- * (NULL trans)
- */
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes);
-
-/*
- * Inform qgroup to trace all leaf items of data
- *
- * Return 0 for success
- * Return <0 for error(ENOMEM)
- */
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct extent_buffer *eb);
-/*
- * Inform qgroup to trace a whole subtree, including all its child tree
- * blocks and data.
- * The root tree block is specified by @root_eb.
- *
- * Normally used by relocation(tree block swap) and subvolume deletion.
- *
- * Return 0 for success
- * Return <0 for error(ENOMEM or tree search error)
- */
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
@@ -350,7 +344,8 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
- u64 objectid, struct btrfs_qgroup_inherit *inherit);
+ u64 objectid, u64 inode_rootid,
+ struct btrfs_qgroup_inherit *inherit);
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes,
enum btrfs_qgroup_rsv_type type);
@@ -408,20 +403,8 @@ static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
BTRFS_QGROUP_RSV_META_PREALLOC);
}
-/*
- * Per-transaction meta reservation should be all freed at transaction commit
- * time
- */
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
-
-/*
- * Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
- *
- * This is called when preallocated meta reservation needs to be used.
- * Normally after btrfs_join_transaction() call.
- */
void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
-
void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
/* btrfs_qgroup_swapped_blocks related functions */
@@ -439,5 +422,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
+int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
+ struct btrfs_squota_delta *delta);
#endif
diff --git a/fs/btrfs/raid-stripe-tree.c b/fs/btrfs/raid-stripe-tree.c
new file mode 100644
index 000000000000..944e8f1862aa
--- /dev/null
+++ b/fs/btrfs/raid-stripe-tree.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/btrfs_tree.h>
+#include "ctree.h"
+#include "fs.h"
+#include "accessors.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "raid-stripe-tree.h"
+#include "volumes.h"
+#include "misc.h"
+#include "print-tree.h"
+
+int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *stripe_root = fs_info->stripe_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ u64 found_start;
+ u64 found_end;
+ u64 end = start + length;
+ int slot;
+ int ret;
+
+ if (!stripe_root)
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ while (1) {
+ key.objectid = start;
+ key.type = BTRFS_RAID_STRIPE_KEY;
+ key.offset = length;
+
+ ret = btrfs_search_slot(trans, stripe_root, &key, path, -1, 1);
+ if (ret < 0)
+ break;
+ if (ret > 0) {
+ ret = 0;
+ if (path->slots[0] == 0)
+ break;
+ path->slots[0]--;
+ }
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ found_start = key.objectid;
+ found_end = found_start + key.offset;
+
+ /* That stripe ends before we start, we're done. */
+ if (found_end <= start)
+ break;
+
+ trace_btrfs_raid_extent_delete(fs_info, start, end,
+ found_start, found_end);
+
+ ASSERT(found_start >= start && found_end <= end);
+ ret = btrfs_del_item(trans, stripe_root, path);
+ if (ret)
+ break;
+
+ btrfs_release_path(path);
+ }
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_io_context *bioc)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_key stripe_key;
+ struct btrfs_root *stripe_root = fs_info->stripe_root;
+ const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type);
+ u8 encoding = btrfs_bg_flags_to_raid_index(bioc->map_type);
+ struct btrfs_stripe_extent *stripe_extent;
+ const size_t item_size = struct_size(stripe_extent, strides, num_stripes);
+ int ret;
+
+ stripe_extent = kzalloc(item_size, GFP_NOFS);
+ if (!stripe_extent) {
+ btrfs_abort_transaction(trans, -ENOMEM);
+ btrfs_end_transaction(trans);
+ return -ENOMEM;
+ }
+
+ trace_btrfs_insert_one_raid_extent(fs_info, bioc->logical, bioc->size,
+ num_stripes);
+ btrfs_set_stack_stripe_extent_encoding(stripe_extent, encoding);
+ for (int i = 0; i < num_stripes; i++) {
+ u64 devid = bioc->stripes[i].dev->devid;
+ u64 physical = bioc->stripes[i].physical;
+ u64 length = bioc->stripes[i].length;
+ struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i];
+
+ if (length == 0)
+ length = bioc->size;
+
+ btrfs_set_stack_raid_stride_devid(raid_stride, devid);
+ btrfs_set_stack_raid_stride_physical(raid_stride, physical);
+ }
+
+ stripe_key.objectid = bioc->logical;
+ stripe_key.type = BTRFS_RAID_STRIPE_KEY;
+ stripe_key.offset = bioc->size;
+
+ ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent,
+ item_size);
+ if (ret)
+ btrfs_abort_transaction(trans, ret);
+
+ kfree(stripe_extent);
+
+ return ret;
+}
+
+int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_ordered_extent *ordered_extent)
+{
+ struct btrfs_io_context *bioc;
+ int ret;
+
+ if (!btrfs_fs_incompat(trans->fs_info, RAID_STRIPE_TREE))
+ return 0;
+
+ list_for_each_entry(bioc, &ordered_extent->bioc_list, rst_ordered_entry) {
+ ret = btrfs_insert_one_raid_extent(trans, bioc);
+ if (ret)
+ return ret;
+ }
+
+ while (!list_empty(&ordered_extent->bioc_list)) {
+ bioc = list_first_entry(&ordered_extent->bioc_list,
+ typeof(*bioc), rst_ordered_entry);
+ list_del(&bioc->rst_ordered_entry);
+ btrfs_put_bioc(bioc);
+ }
+
+ return ret;
+}
+
+int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 *length, u64 map_type,
+ u32 stripe_index, struct btrfs_io_stripe *stripe)
+{
+ struct btrfs_root *stripe_root = fs_info->stripe_root;
+ struct btrfs_stripe_extent *stripe_extent;
+ struct btrfs_key stripe_key;
+ struct btrfs_key found_key;
+ struct btrfs_path *path;
+ struct extent_buffer *leaf;
+ const u64 end = logical + *length;
+ int num_stripes;
+ u8 encoding;
+ u64 offset;
+ u64 found_logical;
+ u64 found_length;
+ u64 found_end;
+ int slot;
+ int ret;
+
+ stripe_key.objectid = logical;
+ stripe_key.type = BTRFS_RAID_STRIPE_KEY;
+ stripe_key.offset = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ if (stripe->is_scrub) {
+ path->skip_locking = 1;
+ path->search_commit_root = 1;
+ }
+
+ ret = btrfs_search_slot(NULL, stripe_root, &stripe_key, path, 0, 0);
+ if (ret < 0)
+ goto free_path;
+ if (ret) {
+ if (path->slots[0] != 0)
+ path->slots[0]--;
+ }
+
+ while (1) {
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+
+ btrfs_item_key_to_cpu(leaf, &found_key, slot);
+ found_logical = found_key.objectid;
+ found_length = found_key.offset;
+ found_end = found_logical + found_length;
+
+ if (found_logical > end) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (in_range(logical, found_logical, found_length))
+ break;
+
+ ret = btrfs_next_item(stripe_root, path);
+ if (ret)
+ goto out;
+ }
+
+ offset = logical - found_logical;
+
+ /*
+ * If we have a logically contiguous, but physically non-continuous
+ * range, we need to split the bio. Record the length after which we
+ * must split the bio.
+ */
+ if (end > found_end)
+ *length -= end - found_end;
+
+ num_stripes = btrfs_num_raid_stripes(btrfs_item_size(leaf, slot));
+ stripe_extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
+ encoding = btrfs_stripe_extent_encoding(leaf, stripe_extent);
+
+ if (encoding != btrfs_bg_flags_to_raid_index(map_type)) {
+ ret = -EUCLEAN;
+ btrfs_handle_fs_error(fs_info, ret,
+ "on-disk stripe encoding %d doesn't match RAID index %d",
+ encoding,
+ btrfs_bg_flags_to_raid_index(map_type));
+ goto out;
+ }
+
+ for (int i = 0; i < num_stripes; i++) {
+ struct btrfs_raid_stride *stride = &stripe_extent->strides[i];
+ u64 devid = btrfs_raid_stride_devid(leaf, stride);
+ u64 physical = btrfs_raid_stride_physical(leaf, stride);
+
+ if (devid != stripe->dev->devid)
+ continue;
+
+ if ((map_type & BTRFS_BLOCK_GROUP_DUP) && stripe_index != i)
+ continue;
+
+ stripe->physical = physical + offset;
+
+ trace_btrfs_get_raid_extent_offset(fs_info, logical, *length,
+ stripe->physical, devid);
+
+ ret = 0;
+ goto free_path;
+ }
+
+ /* If we're here, we haven't found the requested devid in the stripe. */
+ ret = -ENOENT;
+out:
+ if (ret > 0)
+ ret = -ENOENT;
+ if (ret && ret != -EIO && !stripe->is_scrub) {
+ if (IS_ENABLED(CONFIG_BTRFS_DEBUG))
+ btrfs_print_tree(leaf, 1);
+ btrfs_err(fs_info,
+ "cannot find raid-stripe for logical [%llu, %llu] devid %llu, profile %s",
+ logical, logical + *length, stripe->dev->devid,
+ btrfs_bg_type_to_raid_name(map_type));
+ }
+free_path:
+ btrfs_free_path(path);
+
+ return ret;
+}
diff --git a/fs/btrfs/raid-stripe-tree.h b/fs/btrfs/raid-stripe-tree.h
new file mode 100644
index 000000000000..cdb58b38fcb5
--- /dev/null
+++ b/fs/btrfs/raid-stripe-tree.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef BTRFS_RAID_STRIPE_TREE_H
+#define BTRFS_RAID_STRIPE_TREE_H
+
+#define BTRFS_RST_SUPP_BLOCK_GROUP_MASK (BTRFS_BLOCK_GROUP_DUP | \
+ BTRFS_BLOCK_GROUP_RAID1_MASK | \
+ BTRFS_BLOCK_GROUP_RAID0 | \
+ BTRFS_BLOCK_GROUP_RAID10)
+
+struct btrfs_io_context;
+struct btrfs_io_stripe;
+struct btrfs_ordered_extent;
+struct btrfs_trans_handle;
+
+int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length);
+int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 *length, u64 map_type,
+ u32 stripe_index, struct btrfs_io_stripe *stripe);
+int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_ordered_extent *ordered_extent);
+
+static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info,
+ u64 map_type)
+{
+ u64 type = map_type & BTRFS_BLOCK_GROUP_TYPE_MASK;
+ u64 profile = map_type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+ if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE))
+ return false;
+
+ if (type != BTRFS_BLOCK_GROUP_DATA)
+ return false;
+
+ if (profile & BTRFS_RST_SUPP_BLOCK_GROUP_MASK)
+ return true;
+
+ return false;
+}
+
+static inline int btrfs_num_raid_stripes(u32 item_size)
+{
+ return (item_size - offsetof(struct btrfs_stripe_extent, strides)) /
+ sizeof(struct btrfs_raid_stride);
+}
+
+#endif
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 95d28497de7c..1f62976bee82 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -485,6 +485,9 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
ret = add_shared_data_ref(fs_info, offset, count,
key->objectid, key->offset);
break;
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ break;
default:
btrfs_err(fs_info, "invalid key type in iref");
ret = -EINVAL;
@@ -652,7 +655,7 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info,
}
/*
- * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
+ * Called when we modify a ref for a bytenr.
*
* This will add an action item to the given bytenr and do sanity checks to make
* sure we haven't messed something up. If we are making a new allocation and
@@ -681,10 +684,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
if (generic_ref->type == BTRFS_REF_METADATA) {
if (!parent)
- ref_root = generic_ref->tree_ref.owning_root;
+ ref_root = generic_ref->tree_ref.ref_root;
owner = generic_ref->tree_ref.level;
} else if (!parent) {
- ref_root = generic_ref->data_ref.owning_root;
+ ref_root = generic_ref->data_ref.ref_root;
owner = generic_ref->data_ref.ino;
offset = generic_ref->data_ref.offset;
}
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 65d2bd6910f2..f88b0c2ac3fe 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -25,12 +25,11 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
const u64 olen,
int no_time_update)
{
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
inode_inc_iversion(inode);
if (!no_time_update) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
/*
* We round up to the block size at eof when determining which
@@ -43,7 +42,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 9951a0caf5bb..f5d9e5f74a52 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -111,8 +111,8 @@ struct tree_block {
}; /* Use rb_simple_node for search/insert */
u64 owner;
struct btrfs_key key;
- unsigned int level:8;
- unsigned int key_ready:1;
+ u8 level;
+ bool key_ready;
};
#define MAX_EXTENTS 128
@@ -122,6 +122,13 @@ struct file_extent_cluster {
u64 end;
u64 boundary[MAX_EXTENTS];
unsigned int nr;
+ u64 owning_root;
+};
+
+/* Stages of data relocation. */
+enum reloc_stage {
+ MOVE_DATA_EXTENTS,
+ UPDATE_DATA_PTRS
};
struct reloc_control {
@@ -155,16 +162,12 @@ struct reloc_control {
u64 search_start;
u64 extents_found;
- unsigned int stage:8;
- unsigned int create_reloc_tree:1;
- unsigned int merge_reloc_tree:1;
- unsigned int found_file_extent:1;
+ enum reloc_stage stage;
+ bool create_reloc_tree;
+ bool merge_reloc_tree;
+ bool found_file_extent;
};
-/* stages of data relocation */
-#define MOVE_DATA_EXTENTS 0
-#define UPDATE_DATA_PTRS 1
-
static void mark_block_processed(struct reloc_control *rc,
struct btrfs_backref_node *node)
{
@@ -180,13 +183,6 @@ static void mark_block_processed(struct reloc_control *rc,
node->processed = 1;
}
-
-static void mapping_tree_init(struct mapping_tree *tree)
-{
- tree->rb_root = RB_ROOT;
- spin_lock_init(&tree->lock);
-}
-
/*
* walk up backref nodes until reach node presents tree root
*/
@@ -299,7 +295,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
return 1;
}
-static bool reloc_root_is_dead(struct btrfs_root *root)
+static bool reloc_root_is_dead(const struct btrfs_root *root)
{
/*
* Pair with set_bit/clear_bit in clean_dirty_subvols and
@@ -320,7 +316,7 @@ static bool reloc_root_is_dead(struct btrfs_root *root)
* from no reloc root. But btrfs_should_ignore_reloc_root() below is a
* special case.
*/
-static bool have_reloc_root(struct btrfs_root *root)
+static bool have_reloc_root(const struct btrfs_root *root)
{
if (reloc_root_is_dead(root))
return false;
@@ -329,31 +325,30 @@ static bool have_reloc_root(struct btrfs_root *root)
return true;
}
-int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
+bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
- return 0;
+ return false;
/* This root has been merged with its reloc tree, we can ignore it */
if (reloc_root_is_dead(root))
- return 1;
+ return true;
reloc_root = root->reloc_root;
if (!reloc_root)
- return 0;
+ return false;
if (btrfs_header_generation(reloc_root->commit_root) ==
root->fs_info->running_transaction->transid)
- return 0;
+ return false;
/*
- * if there is reloc tree and it was created in previous
- * transaction backref lookup can find the reloc tree,
- * so backref node for the fs tree root is useless for
- * relocation.
+ * If there is reloc tree and it was created in previous transaction
+ * backref lookup can find the reloc tree, so backref node for the fs
+ * tree root is useless for relocation.
*/
- return 1;
+ return true;
}
/*
@@ -466,6 +461,7 @@ static bool handle_useless_nodes(struct reloc_control *rc,
* cached.
*/
static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+ struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct btrfs_key *node_key,
int level, u64 bytenr)
{
@@ -499,8 +495,8 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
/* Breadth-first search to build backref cache */
do {
- ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
- cur);
+ ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
+ node_key, cur);
if (ret < 0) {
err = ret;
goto out;
@@ -546,7 +542,7 @@ out:
*/
static int clone_backref_node(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
- struct btrfs_root *src,
+ const struct btrfs_root *src,
struct btrfs_root *dest)
{
struct btrfs_root *reloc_root = src->reloc_root;
@@ -631,7 +627,7 @@ fail:
/*
* helper to add 'address of tree root -> reloc tree' mapping
*/
-static int __must_check __add_reloc_root(struct btrfs_root *root)
+static int __add_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
@@ -1158,7 +1154,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
key.offset -= btrfs_file_extent_offset(leaf, fi);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
- num_bytes, parent);
+ num_bytes, parent, root->root_key.objectid);
btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
key.objectid, key.offset,
root->root_key.objectid, false);
@@ -1169,7 +1165,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
}
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- num_bytes, parent);
+ num_bytes, parent, root->root_key.objectid);
btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
key.objectid, key.offset,
root->root_key.objectid, false);
@@ -1180,15 +1176,15 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
}
}
if (dirty)
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (inode)
btrfs_add_delayed_iput(BTRFS_I(inode));
return ret;
}
-static noinline_for_stack
-int memcmp_node_keys(struct extent_buffer *eb, int slot,
- struct btrfs_path *path, int level)
+static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
+ int slot, const struct btrfs_path *path,
+ int level)
{
struct btrfs_disk_key key1;
struct btrfs_disk_key key2;
@@ -1373,16 +1369,17 @@ again:
*/
btrfs_set_node_blockptr(parent, slot, new_bytenr);
btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
- btrfs_mark_buffer_dirty(parent);
+ btrfs_mark_buffer_dirty(trans, parent);
btrfs_set_node_blockptr(path->nodes[level],
path->slots[level], old_bytenr);
btrfs_set_node_ptr_generation(path->nodes[level],
path->slots[level], old_ptr_gen);
- btrfs_mark_buffer_dirty(path->nodes[level]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[level]);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
- blocksize, path->nodes[level]->start);
+ blocksize, path->nodes[level]->start,
+ src->root_key.objectid);
btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
@@ -1391,7 +1388,7 @@ again:
break;
}
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
- blocksize, 0);
+ blocksize, 0, dest->root_key.objectid);
btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
true);
ret = btrfs_inc_extent_ref(trans, &ref);
@@ -1400,8 +1397,9 @@ again:
break;
}
+ /* We don't know the real owning_root, use 0. */
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
- blocksize, path->nodes[level]->start);
+ blocksize, path->nodes[level]->start, 0);
btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
0, true);
ret = btrfs_free_extent(trans, &ref);
@@ -1410,8 +1408,9 @@ again:
break;
}
+ /* We don't know the real owning_root, use 0. */
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
- blocksize, 0);
+ blocksize, 0, 0);
btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
0, true);
ret = btrfs_free_extent(trans, &ref);
@@ -1517,8 +1516,8 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
* [min_key, max_key)
*/
static int invalidate_extent_cache(struct btrfs_root *root,
- struct btrfs_key *min_key,
- struct btrfs_key *max_key)
+ const struct btrfs_key *min_key,
+ const struct btrfs_key *max_key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = NULL;
@@ -1896,7 +1895,7 @@ again:
}
}
- rc->merge_reloc_tree = 1;
+ rc->merge_reloc_tree = true;
while (!list_empty(&rc->reloc_roots)) {
reloc_root = list_entry(rc->reloc_roots.next,
@@ -2516,11 +2515,12 @@ static int do_relocation(struct btrfs_trans_handle *trans,
node->eb->start);
btrfs_set_node_ptr_generation(upper->eb, slot,
trans->transid);
- btrfs_mark_buffer_dirty(upper->eb);
+ btrfs_mark_buffer_dirty(trans, upper->eb);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
node->eb->start, blocksize,
- upper->eb->start);
+ upper->eb->start,
+ btrfs_header_owner(upper->eb));
btrfs_init_tree_ref(&ref, node->level,
btrfs_header_owner(upper->eb),
root->root_key.objectid, false);
@@ -2632,7 +2632,7 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
u32 blocksize = rc->extent_root->fs_info->nodesize;
if (test_range_bit(&rc->processed_blocks, bytenr,
- bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
+ bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
return 1;
return 0;
}
@@ -2659,7 +2659,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
else
btrfs_node_key_to_cpu(eb, &block->key, 0);
free_extent_buffer(eb);
- block->key_ready = 1;
+ block->key_ready = true;
return 0;
}
@@ -2803,7 +2803,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Do tree relocation */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
- node = build_backref_tree(rc, &block->key,
+ node = build_backref_tree(trans, rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
err = PTR_ERR(node);
@@ -2829,7 +2829,7 @@ out_free_blocks:
static noinline_for_stack int prealloc_file_extent_cluster(
struct btrfs_inode *inode,
- struct file_extent_cluster *cluster)
+ const struct file_extent_cluster *cluster)
{
u64 alloc_hint = 0;
u64 start;
@@ -2964,7 +2964,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
/*
* Allow error injection to test balance/relocation cancellation
*/
-noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
+noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
{
return atomic_read(&fs_info->balance_cancel_req) ||
atomic_read(&fs_info->reloc_cancel_req) ||
@@ -2972,7 +2972,7 @@ noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
}
ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
-static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
+static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
int cluster_nr)
{
/* Last extent, use cluster end directly */
@@ -2984,7 +2984,7 @@ static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
}
static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
- struct file_extent_cluster *cluster,
+ const struct file_extent_cluster *cluster,
int *cluster_nr, unsigned long page_index)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -3119,7 +3119,7 @@ release_page:
}
static int relocate_file_extent_cluster(struct inode *inode,
- struct file_extent_cluster *cluster)
+ const struct file_extent_cluster *cluster)
{
u64 offset = BTRFS_I(inode)->index_cnt;
unsigned long index;
@@ -3157,11 +3157,12 @@ out:
return ret;
}
-static noinline_for_stack
-int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
- struct file_extent_cluster *cluster)
+static noinline_for_stack int relocate_data_extent(struct inode *inode,
+ const struct btrfs_key *extent_key,
+ struct file_extent_cluster *cluster)
{
int ret;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
ret = relocate_file_extent_cluster(inode, cluster);
@@ -3170,8 +3171,38 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
cluster->nr = 0;
}
- if (!cluster->nr)
+ /*
+ * Under simple quotas, we set root->relocation_src_root when we find
+ * the extent. If adjacent extents have different owners, we can't merge
+ * them while relocating. Handle this by storing the owning root that
+ * started a cluster and if we see an extent from a different root break
+ * cluster formation (just like the above case of non-adjacent extents).
+ *
+ * Without simple quotas, relocation_src_root is always 0, so we should
+ * never see a mismatch, and it should have no effect on relocation
+ * clusters.
+ */
+ if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
+ u64 tmp = root->relocation_src_root;
+
+ /*
+ * root->relocation_src_root is the state that actually affects
+ * the preallocation we do here, so set it to the root owning
+ * the cluster we need to relocate.
+ */
+ root->relocation_src_root = cluster->owning_root;
+ ret = relocate_file_extent_cluster(inode, cluster);
+ if (ret)
+ return ret;
+ cluster->nr = 0;
+ /* And reset it back for the current extent's owning root. */
+ root->relocation_src_root = tmp;
+ }
+
+ if (!cluster->nr) {
cluster->start = extent_key->objectid;
+ cluster->owning_root = root->relocation_src_root;
+ }
else
BUG_ON(cluster->nr >= MAX_EXTENTS);
cluster->end = extent_key->objectid + extent_key->offset - 1;
@@ -3192,7 +3223,7 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
* the major work is getting the generation and level of the block
*/
static int add_tree_block(struct reloc_control *rc,
- struct btrfs_key *extent_key,
+ const struct btrfs_key *extent_key,
struct btrfs_path *path,
struct rb_root *blocks)
{
@@ -3277,7 +3308,7 @@ static int add_tree_block(struct reloc_control *rc,
block->key.objectid = rc->extent_root->fs_info->nodesize;
block->key.offset = generation;
block->level = level;
- block->key_ready = 0;
+ block->key_ready = false;
block->owner = owner;
rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
@@ -3443,11 +3474,10 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
/*
* helper to find all tree blocks that reference a given data extent
*/
-static noinline_for_stack
-int add_data_references(struct reloc_control *rc,
- struct btrfs_key *extent_key,
- struct btrfs_path *path,
- struct rb_root *blocks)
+static noinline_for_stack int add_data_references(struct reloc_control *rc,
+ const struct btrfs_key *extent_key,
+ struct btrfs_path *path,
+ struct rb_root *blocks)
{
struct btrfs_backref_walk_ctx ctx = { 0 };
struct ulist_iterator leaf_uiter;
@@ -3621,7 +3651,7 @@ int prepare_to_relocate(struct reloc_control *rc)
if (ret)
return ret;
- rc->create_reloc_tree = 1;
+ rc->create_reloc_tree = true;
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
@@ -3701,6 +3731,21 @@ restart:
struct btrfs_extent_item);
flags = btrfs_extent_flags(path->nodes[0], ei);
+ /*
+ * If we are relocating a simple quota owned extent item, we
+ * need to note the owner on the reloc data root so that when
+ * we allocate the replacement item, we can attribute it to the
+ * correct eventual owner (rather than the reloc data root).
+ */
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
+ struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
+ u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
+ path->nodes[0],
+ path->slots[0]);
+
+ root->relocation_src_root = owning_root_id;
+ }
+
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
ret = add_tree_block(rc, &key, path, &blocks);
} else if (rc->stage == UPDATE_DATA_PTRS &&
@@ -3733,7 +3778,7 @@ restart:
if (rc->stage == MOVE_DATA_EXTENTS &&
(flags & BTRFS_EXTENT_FLAG_DATA)) {
- rc->found_file_extent = 1;
+ rc->found_file_extent = true;
ret = relocate_data_extent(rc->data_inode,
&key, &rc->cluster);
if (ret < 0) {
@@ -3770,7 +3815,7 @@ restart:
err = ret;
}
- rc->create_reloc_tree = 0;
+ rc->create_reloc_tree = false;
set_reloc_control(rc);
btrfs_backref_release_cache(&rc->backref_cache);
@@ -3788,7 +3833,7 @@ restart:
merge_reloc_roots(rc);
- rc->merge_reloc_tree = 0;
+ rc->merge_reloc_tree = false;
unset_reloc_control(rc);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
@@ -3834,7 +3879,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@@ -3873,9 +3918,9 @@ out:
* helper to create inode for data relocation.
* the inode is in data relocation tree and its link count is 0
*/
-static noinline_for_stack
-struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
- struct btrfs_block_group *group)
+static noinline_for_stack struct inode *create_reloc_inode(
+ struct btrfs_fs_info *fs_info,
+ const struct btrfs_block_group *group)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
@@ -3970,8 +4015,9 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&rc->reloc_roots);
INIT_LIST_HEAD(&rc->dirty_subvol_roots);
- btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
- mapping_tree_init(&rc->reloc_root_tree);
+ btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
+ rc->reloc_root_tree.rb_root = RB_ROOT;
+ spin_lock_init(&rc->reloc_root_tree.lock);
extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
return rc;
}
@@ -4003,7 +4049,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
block_group->start, buf);
}
-static const char *stage_to_string(int stage)
+static const char *stage_to_string(enum reloc_stage stage)
{
if (stage == MOVE_DATA_EXTENTS)
return "move data extents";
@@ -4119,7 +4165,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
WARN_ON(ret && ret != -EAGAIN);
while (1) {
- int finishes_stage;
+ enum reloc_stage finishes_stage;
mutex_lock(&fs_info->cleaner_mutex);
ret = relocate_block_group(rc);
@@ -4302,7 +4348,7 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
goto out_unset;
}
- rc->merge_reloc_tree = 1;
+ rc->merge_reloc_tree = true;
while (!list_empty(&reloc_roots)) {
reloc_root = list_entry(reloc_roots.next,
@@ -4421,7 +4467,8 @@ int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
}
int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf,
+ struct btrfs_root *root,
+ const struct extent_buffer *buf,
struct extent_buffer *cow)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -4560,7 +4607,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
*
* Return U64_MAX if no running relocation.
*/
-u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info)
+u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
{
u64 logical = U64_MAX;
diff --git a/fs/btrfs/relocation.h b/fs/btrfs/relocation.h
index 77d69f6ae967..5fb60f2deb53 100644
--- a/fs/btrfs/relocation.h
+++ b/fs/btrfs/relocation.h
@@ -10,15 +10,16 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
int btrfs_recover_relocation(struct btrfs_fs_info *fs_info);
int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered);
int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct extent_buffer *buf,
+ struct btrfs_root *root,
+ const struct extent_buffer *buf,
struct extent_buffer *cow);
void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve);
int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
-int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
+int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info);
struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr);
-int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
-u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info);
+bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root);
+u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info);
#endif
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 859874579456..603ad1459368 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -51,7 +51,8 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
}
/*
- * btrfs_find_root - lookup the root by the key.
+ * Lookup the root by the key.
+ *
* root: the root of the root tree
* search_key: the key to search
* path: the path we search
@@ -191,7 +192,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
write_extent_buffer(l, item, ptr, sizeof(*item));
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
out:
btrfs_free_path(path);
return ret;
@@ -438,7 +439,7 @@ again:
btrfs_set_root_ref_name_len(leaf, ref, name->len);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(leaf, name->name, ptr, name->len);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(path);
@@ -485,7 +486,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
}
/*
- * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
+ * Reserve space for subvolume operation.
+ *
* root: the root of the parent directory
* rsv: block reservation
* items: the number of items that we need do reservation
@@ -508,7 +510,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
- if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+ if (btrfs_qgroup_enabled(fs_info)) {
/* One for parent inode, two for dir entries */
qgroup_num_bytes = 3 * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta_prealloc(root,
diff --git a/fs/btrfs/root-tree.h b/fs/btrfs/root-tree.h
index cbbaca32126e..8b2c3859e464 100644
--- a/fs/btrfs/root-tree.h
+++ b/fs/btrfs/root-tree.h
@@ -3,6 +3,8 @@
#ifndef BTRFS_ROOT_TREE_H
#define BTRFS_ROOT_TREE_H
+struct fscrypt_str;
+
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems, bool use_global_rsv);
@@ -18,10 +20,8 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, const struct btrfs_key *key
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key,
struct btrfs_root_item *item);
-int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_key *key,
- struct btrfs_root_item *item);
+int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ struct btrfs_key *key, struct btrfs_root_item *item);
int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
struct btrfs_path *path, struct btrfs_root_item *root_item,
struct btrfs_key *root_key);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b877203f1dc5..9ce5be21b036 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -16,7 +16,6 @@
#include "backref.h"
#include "extent_io.h"
#include "dev-replace.h"
-#include "check-integrity.h"
#include "raid56.h"
#include "block-group.h"
#include "zoned.h"
@@ -24,6 +23,7 @@
#include "accessors.h"
#include "file-item.h"
#include "scrub.h"
+#include "raid-stripe-tree.h"
/*
* This is only the first step towards a full-features scrub. It reads all
@@ -897,7 +897,7 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
ASSERT(stripe->mirror_num >= 1);
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
stripe->logical, &mapped_len, &bioc,
- NULL, NULL, 1);
+ NULL, NULL);
/*
* If we failed, dev will be NULL, and later detailed reports
* will just be skipped.
@@ -1635,6 +1635,71 @@ static void scrub_reset_stripe(struct scrub_stripe *stripe)
}
}
+static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
+ struct scrub_stripe *stripe)
+{
+ struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
+ struct btrfs_bio *bbio = NULL;
+ u64 stripe_len = BTRFS_STRIPE_LEN;
+ int mirror = stripe->mirror_num;
+ int i;
+
+ atomic_inc(&stripe->pending_io);
+
+ for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
+ struct page *page = scrub_stripe_get_page(stripe, i);
+ unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
+
+ /* The current sector cannot be merged, submit the bio. */
+ if (bbio &&
+ ((i > 0 &&
+ !test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
+ bbio->bio.bi_iter.bi_size >= stripe_len)) {
+ ASSERT(bbio->bio.bi_iter.bi_size);
+ atomic_inc(&stripe->pending_io);
+ btrfs_submit_bio(bbio, mirror);
+ bbio = NULL;
+ }
+
+ if (!bbio) {
+ struct btrfs_io_stripe io_stripe = {};
+ struct btrfs_io_context *bioc = NULL;
+ const u64 logical = stripe->logical +
+ (i << fs_info->sectorsize_bits);
+ int err;
+
+ bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
+ fs_info, scrub_read_endio, stripe);
+ bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
+
+ io_stripe.is_scrub = true;
+ err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
+ &stripe_len, &bioc, &io_stripe,
+ &mirror);
+ btrfs_put_bioc(bioc);
+ if (err) {
+ btrfs_bio_end_io(bbio,
+ errno_to_blk_status(err));
+ return;
+ }
+ }
+
+ __bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
+ }
+
+ if (bbio) {
+ ASSERT(bbio->bio.bi_iter.bi_size);
+ atomic_inc(&stripe->pending_io);
+ btrfs_submit_bio(bbio, mirror);
+ }
+
+ if (atomic_dec_and_test(&stripe->pending_io)) {
+ wake_up(&stripe->io_wait);
+ INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
+ queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
+ }
+}
+
static void scrub_submit_initial_read(struct scrub_ctx *sctx,
struct scrub_stripe *stripe)
{
@@ -1646,6 +1711,11 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
ASSERT(stripe->mirror_num > 0);
ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
+ if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
+ scrub_submit_extent_sector_read(sctx, stripe);
+ return;
+ }
+
bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
scrub_read_endio, stripe);
@@ -1952,7 +2022,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
- &length, &bioc, NULL, NULL, 1);
+ &length, &bioc, NULL, NULL);
if (ret < 0) {
btrfs_put_bioc(bioc);
btrfs_bio_counter_dec(fs_info);
@@ -2717,7 +2787,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
if (scrub_dev->fs_devices != fs_info->fs_devices)
gen = scrub_dev->generation;
else
- gen = fs_info->last_trans_committed;
+ gen = btrfs_get_last_trans_committed(fs_info);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 3a566150c531..3b929f0e8f04 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -796,7 +796,7 @@ static int send_cmd(struct send_ctx *sctx)
put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
put_unaligned_le32(0, &hdr->crc);
- crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+ crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
put_unaligned_le32(crc, &hdr->crc);
ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
@@ -5669,8 +5669,8 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
hdr = (struct btrfs_cmd_header *)sctx->send_buf;
hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
hdr->crc = 0;
- crc = btrfs_crc32c(0, sctx->send_buf, sctx->send_size);
- crc = btrfs_crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
+ crc = crc32c(0, sctx->send_buf, sctx->send_size);
+ crc = crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
hdr->crc = cpu_to_le32(crc);
ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index d7e8cd4f140c..571bb13587d5 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -345,8 +345,10 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_space_info *data_sinfo;
u64 profile;
u64 avail;
+ u64 data_chunk_size;
int factor;
if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
@@ -364,6 +366,36 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
*/
factor = btrfs_bg_type_to_factor(profile);
avail = div_u64(avail, factor);
+ if (avail == 0)
+ return 0;
+
+ /*
+ * Calculate the data_chunk_size, space_info->chunk_size is the
+ * "optimal" chunk size based on the fs size. However when we actually
+ * allocate the chunk we will strip this down further, making it no more
+ * than 10% of the disk or 1G, whichever is smaller.
+ */
+ data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
+ data_chunk_size = min(data_sinfo->chunk_size,
+ mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
+ data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
+
+ /*
+ * Since data allocations immediately use block groups as part of the
+ * reservation, because we assume that data reservations will == actual
+ * usage, we could potentially overcommit and then immediately have that
+ * available space used by a data allocation, which could put us in a
+ * bind when we get close to filling the file system.
+ *
+ * To handle this simply remove the data_chunk_size from the available
+ * space. If we are relatively empty this won't affect our ability to
+ * overcommit much, and if we're very close to full it'll keep us from
+ * getting into a position where we've given ourselves very little
+ * metadata wiggle room.
+ */
+ if (avail <= data_chunk_size)
+ return 0;
+ avail -= data_chunk_size;
/*
* If we aren't flushing all things, let us overcommit up to
@@ -556,18 +588,6 @@ static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
return nr;
}
-static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info,
- u64 to_reclaim)
-{
- const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1);
- u64 nr;
-
- nr = div64_u64(to_reclaim, bytes);
- if (!nr)
- nr = 1;
- return nr;
-}
-
#define EXTENT_SIZE_PER_ITEM SZ_256K
/*
@@ -749,10 +769,9 @@ static void flush_space(struct btrfs_fs_info *fs_info,
break;
}
if (state == FLUSH_DELAYED_REFS_NR)
- nr = calc_delayed_refs_nr(fs_info, num_bytes);
+ btrfs_run_delayed_refs(trans, num_bytes);
else
- nr = 0;
- btrfs_run_delayed_refs(trans, nr);
+ btrfs_run_delayed_refs(trans, 0);
btrfs_end_transaction(trans);
break;
case ALLOC_CHUNK:
@@ -978,7 +997,8 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
}
/*
- * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
+ * We've exhausted our flushing, start failing tickets.
+ *
* @fs_info - fs_info for this fs
* @space_info - the space info we were flushing
*
@@ -1742,7 +1762,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* Try to reserve metadata bytes from the block_rsv's space.
*
* @fs_info: the filesystem
- * @block_rsv: block_rsv we're allocating for
+ * @space_info: the space_info we're allocating for
* @orig_bytes: number of bytes we want
* @flush: whether or not we can flush to make our reservation
*
@@ -1754,21 +1774,19 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* space already.
*/
int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *block_rsv,
+ struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
int ret;
- ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
+ ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
if (ret == -ENOSPC) {
trace_btrfs_space_reservation(fs_info, "space_info:enospc",
- block_rsv->space_info->flags,
- orig_bytes, 1);
+ space_info->flags, orig_bytes, 1);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
- btrfs_dump_space_info(fs_info, block_rsv->space_info,
- orig_bytes, 0);
+ btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
}
return ret;
}
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 0bb9d14e60a8..92c595fed1b0 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -3,6 +3,7 @@
#ifndef BTRFS_SPACE_INFO_H
#define BTRFS_SPACE_INFO_H
+#include <trace/events/btrfs.h>
#include "volumes.h"
/*
@@ -212,7 +213,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *block_rsv,
+ struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush);
void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 1a093ec0f7e3..6ecf78d09694 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -26,6 +26,7 @@
#include <linux/ratelimit.h>
#include <linux/crc32c.h>
#include <linux/btrfs.h>
+#include <linux/security.h>
#include "messages.h"
#include "delayed-inode.h"
#include "ctree.h"
@@ -129,9 +130,6 @@ enum {
Opt_inode_cache, Opt_noinode_cache,
/* Debugging options */
- Opt_check_integrity,
- Opt_check_integrity_including_extent_data,
- Opt_check_integrity_print_mask,
Opt_enospc_debug, Opt_noenospc_debug,
#ifdef CONFIG_BTRFS_DEBUG
Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
@@ -200,9 +198,6 @@ static const match_table_t tokens = {
{Opt_recovery, "recovery"},
/* Debugging options */
- {Opt_check_integrity, "check_int"},
- {Opt_check_integrity_including_extent_data, "check_int_data"},
- {Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
{Opt_enospc_debug, "enospc_debug"},
{Opt_noenospc_debug, "noenospc_debug"},
#ifdef CONFIG_BTRFS_DEBUG
@@ -707,44 +702,6 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_skip_balance:
btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
break;
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- case Opt_check_integrity_including_extent_data:
- btrfs_warn(info,
- "integrity checker is deprecated and will be removed in 6.7");
- btrfs_info(info,
- "enabling check integrity including extent data");
- btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY_DATA);
- btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
- break;
- case Opt_check_integrity:
- btrfs_warn(info,
- "integrity checker is deprecated and will be removed in 6.7");
- btrfs_info(info, "enabling check integrity");
- btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
- break;
- case Opt_check_integrity_print_mask:
- ret = match_int(&args[0], &intarg);
- if (ret) {
- btrfs_err(info,
- "unrecognized check_integrity_print_mask value %s",
- args[0].from);
- goto out;
- }
- info->check_integrity_print_mask = intarg;
- btrfs_warn(info,
- "integrity checker is deprecated and will be removed in 6.7");
- btrfs_info(info, "check_integrity_print_mask 0x%x",
- info->check_integrity_print_mask);
- break;
-#else
- case Opt_check_integrity_including_extent_data:
- case Opt_check_integrity:
- case Opt_check_integrity_print_mask:
- btrfs_err(info,
- "support for check_integrity* not compiled in!");
- ret = -EINVAL;
- goto out;
-#endif
case Opt_fatal_errors:
if (strcmp(args[0].from, "panic") == 0) {
btrfs_set_opt(info->mount_opt,
@@ -889,7 +846,7 @@ static int btrfs_parse_device_options(const char *options, blk_mode_t flags)
error = -ENOMEM;
goto out;
}
- device = btrfs_scan_one_device(device_name, flags);
+ device = btrfs_scan_one_device(device_name, flags, false);
kfree(device_name);
if (IS_ERR(device)) {
error = PTR_ERR(device);
@@ -1305,15 +1262,6 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",autodefrag");
if (btrfs_test_opt(info, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(info, CHECK_INTEGRITY_DATA))
- seq_puts(seq, ",check_int_data");
- else if (btrfs_test_opt(info, CHECK_INTEGRITY))
- seq_puts(seq, ",check_int");
- if (info->check_integrity_print_mask)
- seq_printf(seq, ",check_int_print_mask=%d",
- info->check_integrity_print_mask);
-#endif
if (info->metadata_ratio)
seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
@@ -1484,7 +1432,12 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
goto error_fs_info;
}
- device = btrfs_scan_one_device(device_name, mode);
+ /*
+ * With 'true' passed to btrfs_scan_one_device() (mount time) we expect
+ * either a valid device or an error.
+ */
+ device = btrfs_scan_one_device(device_name, mode, true);
+ ASSERT(device != NULL);
if (IS_ERR(device)) {
mutex_unlock(&uuid_mutex);
error = PTR_ERR(device);
@@ -2196,7 +2149,11 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
mutex_lock(&uuid_mutex);
- device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ);
+ /*
+ * Scanning outside of mount can return NULL which would turn
+ * into 0 error code.
+ */
+ device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
ret = PTR_ERR_OR_ZERO(device);
mutex_unlock(&uuid_mutex);
break;
@@ -2210,8 +2167,12 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
break;
case BTRFS_IOC_DEVICES_READY:
mutex_lock(&uuid_mutex);
- device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ);
- if (IS_ERR(device)) {
+ /*
+ * Scanning outside of mount can return NULL which would turn
+ * into 0 error code.
+ */
+ device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
+ if (IS_ERR_OR_NULL(device)) {
mutex_unlock(&uuid_mutex);
ret = PTR_ERR(device);
break;
@@ -2256,6 +2217,7 @@ static int check_dev_super(struct btrfs_device *dev)
{
struct btrfs_fs_info *fs_info = dev->fs_info;
struct btrfs_super_block *sb;
+ u64 last_trans;
u16 csum_type;
int ret = 0;
@@ -2291,10 +2253,10 @@ static int check_dev_super(struct btrfs_device *dev)
if (ret < 0)
goto out;
- if (btrfs_super_generation(sb) != fs_info->last_trans_committed) {
+ last_trans = btrfs_get_last_trans_committed(fs_info);
+ if (btrfs_super_generation(sb) != last_trans) {
btrfs_err(fs_info, "transid mismatch, has %llu expect %llu",
- btrfs_super_generation(sb),
- fs_info->last_trans_committed);
+ btrfs_super_generation(sb), last_trans);
ret = -EUCLEAN;
goto out;
}
@@ -2404,9 +2366,6 @@ static int __init btrfs_print_mod_info(void)
#ifdef CONFIG_BTRFS_ASSERT
", assert=on"
#endif
-#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- ", integrity-checker=on"
-#endif
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
", ref-verify=on"
#endif
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index b1d1ac25237b..e6b51fb3ddc1 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -291,12 +291,15 @@ BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
BTRFS_FEAT_ATTR_COMPAT_RO(block_group_tree, BLOCK_GROUP_TREE);
BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
+BTRFS_FEAT_ATTR_INCOMPAT(simple_quota, SIMPLE_QUOTA);
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
#endif
#ifdef CONFIG_BTRFS_DEBUG
/* Remove once support for extent tree v2 is feature complete */
BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2);
+/* Remove once support for raid stripe tree is feature complete. */
+BTRFS_FEAT_ATTR_INCOMPAT(raid_stripe_tree, RAID_STRIPE_TREE);
#endif
#ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY);
@@ -322,11 +325,13 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(free_space_tree),
BTRFS_FEAT_ATTR_PTR(raid1c34),
BTRFS_FEAT_ATTR_PTR(block_group_tree),
+ BTRFS_FEAT_ATTR_PTR(simple_quota),
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_PTR(zoned),
#endif
#ifdef CONFIG_BTRFS_DEBUG
BTRFS_FEAT_ATTR_PTR(extent_tree_v2),
+ BTRFS_FEAT_ATTR_PTR(raid_stripe_tree),
#endif
#ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_PTR(verity),
@@ -420,6 +425,13 @@ static ssize_t acl_show(struct kobject *kobj, struct kobj_attribute *a, char *bu
}
BTRFS_ATTR(static_feature, acl, acl_show);
+static ssize_t temp_fsid_supported_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ return sysfs_emit(buf, "0\n");
+}
+BTRFS_ATTR(static_feature, temp_fsid, temp_fsid_supported_show);
+
/*
* Features which only depend on kernel version.
*
@@ -433,6 +445,7 @@ static struct attribute *btrfs_supported_static_feature_attrs[] = {
BTRFS_ATTR_PTR(static_feature, send_stream_version),
BTRFS_ATTR_PTR(static_feature, supported_rescue_options),
BTRFS_ATTR_PTR(static_feature, supported_sectorsizes),
+ BTRFS_ATTR_PTR(static_feature, temp_fsid),
NULL
};
@@ -1196,10 +1209,19 @@ static ssize_t btrfs_generation_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
- return sysfs_emit(buf, "%llu\n", fs_info->generation);
+ return sysfs_emit(buf, "%llu\n", btrfs_get_fs_generation(fs_info));
}
BTRFS_ATTR(, generation, btrfs_generation_show);
+static ssize_t btrfs_temp_fsid_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+
+ return sysfs_emit(buf, "%d\n", fs_info->fs_devices->temp_fsid);
+}
+BTRFS_ATTR(, temp_fsid, btrfs_temp_fsid_show);
+
static const char * const btrfs_read_policy_name[] = { "pid" };
static ssize_t btrfs_read_policy_show(struct kobject *kobj,
@@ -1302,6 +1324,7 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, read_policy),
BTRFS_ATTR_PTR(, bg_reclaim_threshold),
BTRFS_ATTR_PTR(, commit_stats),
+ BTRFS_ATTR_PTR(, temp_fsid),
NULL,
};
@@ -2086,6 +2109,33 @@ static ssize_t qgroup_enabled_show(struct kobject *qgroups_kobj,
}
BTRFS_ATTR(qgroups, enabled, qgroup_enabled_show);
+static ssize_t qgroup_mode_show(struct kobject *qgroups_kobj,
+ struct kobj_attribute *a,
+ char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
+ ssize_t ret = 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ ASSERT(btrfs_qgroup_enabled(fs_info));
+ switch (btrfs_qgroup_mode(fs_info)) {
+ case BTRFS_QGROUP_MODE_FULL:
+ ret = sysfs_emit(buf, "qgroup\n");
+ break;
+ case BTRFS_QGROUP_MODE_SIMPLE:
+ ret = sysfs_emit(buf, "squota\n");
+ break;
+ default:
+ btrfs_warn(fs_info, "unexpected qgroup mode %d\n",
+ btrfs_qgroup_mode(fs_info));
+ break;
+ }
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+BTRFS_ATTR(qgroups, mode, qgroup_mode_show);
+
static ssize_t qgroup_inconsistent_show(struct kobject *qgroups_kobj,
struct kobj_attribute *a,
char *buf)
@@ -2148,6 +2198,7 @@ static struct attribute *qgroups_attrs[] = {
BTRFS_ATTR_PTR(qgroups, enabled),
BTRFS_ATTR_PTR(qgroups, inconsistent),
BTRFS_ATTR_PTR(qgroups, drop_subtree_threshold),
+ BTRFS_ATTR_PTR(qgroups, mode),
NULL
};
ATTRIBUTE_GROUPS(qgroups);
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index 5ef0b90e25c3..6a43a64ba55a 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -61,7 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
key.type = BTRFS_EXTENT_CSUM_KEY;
key.offset = 0;
- btrfs_setup_item_for_insert(root, path, &key, value_len);
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
value_len);
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 05b03f5eab83..492d69d2fa73 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -34,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = start;
- btrfs_setup_item_for_insert(root, &path, &key, value_len);
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, 1);
btrfs_set_file_extent_type(leaf, fi, type);
@@ -64,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- btrfs_setup_item_for_insert(root, &path, &key, value_len);
+ /*
+ * Passing a NULL trans handle is fine here, we have a dummy root eb
+ * and the tree is a single node (level 0).
+ */
+ btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
}
/*
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index c780d3729463..6e63816dddcb 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -386,7 +386,7 @@ loop:
IO_TREE_TRANS_DIRTY_PAGES);
extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
IO_TREE_FS_PINNED_EXTENTS);
- fs_info->generation++;
+ btrfs_set_fs_generation(fs_info, fs_info->generation + 1);
cur_trans->transid = fs_info->generation;
fs_info->running_transaction = cur_trans;
cur_trans->aborted = 0;
@@ -561,6 +561,69 @@ static inline bool need_reserve_reloc_root(struct btrfs_root *root)
return true;
}
+static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
+ enum btrfs_reserve_flush_enum flush,
+ u64 num_bytes,
+ u64 *delayed_refs_bytes)
+{
+ struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info;
+ u64 extra_delayed_refs_bytes = 0;
+ u64 bytes;
+ int ret;
+
+ /*
+ * If there's a gap between the size of the delayed refs reserve and
+ * its reserved space, than some tasks have added delayed refs or bumped
+ * its size otherwise (due to block group creation or removal, or block
+ * group item update). Also try to allocate that gap in order to prevent
+ * using (and possibly abusing) the global reserve when committing the
+ * transaction.
+ */
+ if (flush == BTRFS_RESERVE_FLUSH_ALL &&
+ !btrfs_block_rsv_full(delayed_refs_rsv)) {
+ spin_lock(&delayed_refs_rsv->lock);
+ if (delayed_refs_rsv->size > delayed_refs_rsv->reserved)
+ extra_delayed_refs_bytes = delayed_refs_rsv->size -
+ delayed_refs_rsv->reserved;
+ spin_unlock(&delayed_refs_rsv->lock);
+ }
+
+ bytes = num_bytes + *delayed_refs_bytes + extra_delayed_refs_bytes;
+
+ /*
+ * We want to reserve all the bytes we may need all at once, so we only
+ * do 1 enospc flushing cycle per transaction start.
+ */
+ ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
+ if (ret == 0) {
+ if (extra_delayed_refs_bytes > 0)
+ btrfs_migrate_to_delayed_refs_rsv(fs_info,
+ extra_delayed_refs_bytes);
+ return 0;
+ }
+
+ if (extra_delayed_refs_bytes > 0) {
+ bytes -= extra_delayed_refs_bytes;
+ ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
+ if (ret == 0)
+ return 0;
+ }
+
+ /*
+ * If we are an emergency flush, which can steal from the global block
+ * reserve, then attempt to not reserve space for the delayed refs, as
+ * we will consume space for them from the global block reserve.
+ */
+ if (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
+ bytes -= *delayed_refs_bytes;
+ *delayed_refs_bytes = 0;
+ ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
+ }
+
+ return ret;
+}
+
static struct btrfs_trans_handle *
start_transaction(struct btrfs_root *root, unsigned int num_items,
unsigned int type, enum btrfs_reserve_flush_enum flush,
@@ -568,10 +631,12 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
u64 qgroup_reserved = 0;
+ u64 delayed_refs_bytes = 0;
bool reloc_reserved = false;
bool do_chunk_alloc = false;
int ret;
@@ -594,9 +659,6 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* the appropriate flushing if need be.
*/
if (num_items && root != fs_info->chunk_root) {
- struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
- u64 delayed_refs_bytes = 0;
-
qgroup_reserved = num_items * fs_info->nodesize;
/*
* Use prealloc for now, as there might be a currently running
@@ -608,20 +670,16 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
if (ret)
return ERR_PTR(ret);
+ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
/*
- * We want to reserve all the bytes we may need all at once, so
- * we only do 1 enospc flushing cycle per transaction start. We
- * accomplish this by simply assuming we'll do num_items worth
- * of delayed refs updates in this trans handle, and refill that
- * amount for whatever is missing in the reserve.
+ * If we plan to insert/update/delete "num_items" from a btree,
+ * we will also generate delayed refs for extent buffers in the
+ * respective btree paths, so reserve space for the delayed refs
+ * that will be generated by the caller as it modifies btrees.
+ * Try to reserve them to avoid excessive use of the global
+ * block reserve.
*/
- num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
- if (flush == BTRFS_RESERVE_FLUSH_ALL &&
- !btrfs_block_rsv_full(delayed_refs_rsv)) {
- delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info,
- num_items);
- num_bytes += delayed_refs_bytes;
- }
+ delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items);
/*
* Do the reservation for the relocation root creation
@@ -631,16 +689,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
reloc_reserved = true;
}
- ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush);
+ ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes,
+ &delayed_refs_bytes);
if (ret)
goto reserve_fail;
- if (delayed_refs_bytes) {
- btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes);
- num_bytes -= delayed_refs_bytes;
- }
- btrfs_block_rsv_add_bytes(rsv, num_bytes, true);
- if (rsv->space_info->force_alloc)
+ btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true);
+
+ if (trans_rsv->space_info->force_alloc)
do_chunk_alloc = true;
} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
!btrfs_block_rsv_full(delayed_refs_rsv)) {
@@ -700,6 +756,7 @@ again:
h->type = type;
INIT_LIST_HEAD(&h->new_bgs);
+ btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS);
smp_mb();
if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
@@ -712,8 +769,17 @@ again:
if (num_bytes) {
trace_btrfs_space_reservation(fs_info, "transaction",
h->transid, num_bytes, 1);
- h->block_rsv = &fs_info->trans_block_rsv;
+ h->block_rsv = trans_rsv;
h->bytes_reserved = num_bytes;
+ if (delayed_refs_bytes > 0) {
+ trace_btrfs_space_reservation(fs_info,
+ "local_delayed_refs_rsv",
+ h->transid,
+ delayed_refs_bytes, 1);
+ h->delayed_refs_bytes_reserved = delayed_refs_bytes;
+ btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true);
+ delayed_refs_bytes = 0;
+ }
h->reloc_reserved = reloc_reserved;
}
@@ -769,8 +835,10 @@ join_fail:
kmem_cache_free(btrfs_trans_handle_cachep, h);
alloc_fail:
if (num_bytes)
- btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
- num_bytes, NULL);
+ btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL);
+ if (delayed_refs_bytes)
+ btrfs_space_info_free_bytes_may_use(fs_info, trans_rsv->space_info,
+ delayed_refs_bytes);
reserve_fail:
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
return ERR_PTR(ret);
@@ -817,7 +885,7 @@ struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *roo
}
/*
- * btrfs_attach_transaction() - catch the running transaction
+ * Catch the running transaction.
*
* It is used when we want to commit the current the transaction, but
* don't want to start a new one.
@@ -836,7 +904,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
}
/*
- * btrfs_attach_transaction_barrier() - catch the running transaction
+ * Catch the running transaction.
*
* It is similar to the above function, the difference is this one
* will wait for all the inactive transactions until they fully
@@ -912,7 +980,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
int ret = 0;
if (transid) {
- if (transid <= fs_info->last_trans_committed)
+ if (transid <= btrfs_get_last_trans_committed(fs_info))
goto out;
/* find specified transaction */
@@ -936,7 +1004,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
* raced with btrfs_commit_transaction
*/
if (!cur_trans) {
- if (transid > fs_info->last_trans_committed)
+ if (transid > btrfs_get_last_trans_committed(fs_info))
ret = -EINVAL;
goto out;
}
@@ -991,11 +1059,14 @@ static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
if (!trans->block_rsv) {
ASSERT(!trans->bytes_reserved);
+ ASSERT(!trans->delayed_refs_bytes_reserved);
return;
}
- if (!trans->bytes_reserved)
+ if (!trans->bytes_reserved) {
+ ASSERT(!trans->delayed_refs_bytes_reserved);
return;
+ }
ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
trace_btrfs_space_reservation(fs_info, "transaction",
@@ -1003,6 +1074,16 @@ static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
btrfs_block_rsv_release(fs_info, trans->block_rsv,
trans->bytes_reserved, NULL);
trans->bytes_reserved = 0;
+
+ if (!trans->delayed_refs_bytes_reserved)
+ return;
+
+ trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv",
+ trans->transid,
+ trans->delayed_refs_bytes_reserved, 0);
+ btrfs_block_rsv_release(fs_info, &trans->delayed_rsv,
+ trans->delayed_refs_bytes_reserved, NULL);
+ trans->delayed_refs_bytes_reserved = 0;
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
@@ -1334,7 +1415,7 @@ again:
}
/* Now flush any delayed refs generated by updating all of the roots */
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
return ret;
@@ -1349,7 +1430,7 @@ again:
* so we want to keep this flushing in this loop to make sure
* everything gets run.
*/
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
return ret;
}
@@ -1484,45 +1565,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
}
/*
- * defrag a given btree.
- * Every leaf in the btree is read and defragged.
- */
-int btrfs_defrag_root(struct btrfs_root *root)
-{
- struct btrfs_fs_info *info = root->fs_info;
- struct btrfs_trans_handle *trans;
- int ret;
-
- if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
- return 0;
-
- while (1) {
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- break;
- }
-
- ret = btrfs_defrag_leaves(trans, root);
-
- btrfs_end_transaction(trans);
- btrfs_btree_balance_dirty(info);
- cond_resched();
-
- if (btrfs_fs_closing(info) || ret != -EAGAIN)
- break;
-
- if (btrfs_defrag_cancelled(info)) {
- btrfs_debug(info, "defrag_root cancelled");
- ret = -EAGAIN;
- break;
- }
- }
- clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
- return ret;
-}
-
-/*
* Do all special snapshot related qgroup dirty hack.
*
* Will do all needed qgroup inherit and dirty hack like switch commit
@@ -1539,11 +1581,10 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
int ret;
/*
- * Save some performance in the case that qgroups are not
- * enabled. If this check races with the ioctl, rescan will
- * kick in anyway.
+ * Save some performance in the case that qgroups are not enabled. If
+ * this check races with the ioctl, rescan will kick in anyway.
*/
- if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
+ if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
/*
@@ -1567,7 +1608,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
* for now flush the delayed refs to narrow the race window where the
* qgroup counters could end up wrong.
*/
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret) {
btrfs_abort_transaction(trans, ret);
return ret;
@@ -1582,7 +1623,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
/* Now qgroup are all updated, we can inherit it to new qgroups */
ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
- inherit);
+ parent->root_key.objectid, inherit);
if (ret < 0)
goto out;
@@ -1732,6 +1773,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
+ ret = btrfs_create_qgroup(trans, objectid);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto fail;
+ }
+
/*
* pull in the delayed directory update
* and the delayed inode item
@@ -1843,8 +1890,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* To co-operate with that hack, we do hack again.
* Or snapshot will be greatly slowed down by a subtree qgroup rescan
*/
- ret = qgroup_account_snapshot(trans, root, parent_root,
- pending->inherit, objectid);
+ if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL)
+ ret = qgroup_account_snapshot(trans, root, parent_root,
+ pending->inherit, objectid);
+ else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
+ ret = btrfs_qgroup_inherit(trans, root->root_key.objectid, objectid,
+ parent_root->root_key.objectid, pending->inherit);
if (ret < 0)
goto fail;
@@ -1860,8 +1911,9 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
fname.disk_name.len * 2);
- parent_inode->i_mtime = inode_set_ctime_current(parent_inode);
- ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
+ inode_set_mtime_to_ts(parent_inode,
+ inode_set_ctime_current(parent_inode));
+ ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@@ -2084,7 +2136,7 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_block_group *block_group, *tmp;
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
- btrfs_delayed_refs_rsv_release(fs_info, 1);
+ btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
list_del_init(&block_group->bg_list);
}
}
@@ -2403,7 +2455,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (ret)
goto unlock_reloc;
- ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+ ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
goto unlock_reloc;
@@ -2536,7 +2588,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
btrfs_clear_space_info_full(fs_info);
- fs_info->last_trans_committed = cur_trans->transid;
+ btrfs_set_last_trans_committed(fs_info, cur_trans->transid);
/*
* We needn't acquire the lock here because there is no other task
* which can change it.
@@ -2654,18 +2706,18 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
*/
void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
const char *function,
- unsigned int line, int errno, bool first_hit)
+ unsigned int line, int error, bool first_hit)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- WRITE_ONCE(trans->aborted, errno);
- WRITE_ONCE(trans->transaction->aborted, errno);
- if (first_hit && errno == -ENOSPC)
+ WRITE_ONCE(trans->aborted, error);
+ WRITE_ONCE(trans->transaction->aborted, error);
+ if (first_hit && error == -ENOSPC)
btrfs_dump_space_info_for_trans_abort(fs_info);
/* Wake up anybody who may be waiting on this transaction */
wake_up(&fs_info->transaction_wait);
wake_up(&fs_info->transaction_blocked_wait);
- __btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
+ __btrfs_handle_fs_error(fs_info, function, line, error, NULL);
}
int __init btrfs_transaction_init(void)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 93869cda6af9..18c4f6e83b78 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -118,8 +118,10 @@ enum {
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
+ u64 delayed_refs_bytes_reserved;
u64 chunk_bytes_reserved;
unsigned long delayed_ref_updates;
+ unsigned long delayed_ref_csum_deletions;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
@@ -139,6 +141,7 @@ struct btrfs_trans_handle {
bool in_fsync;
struct btrfs_fs_info *fs_info;
struct list_head new_bgs;
+ struct btrfs_block_rsv delayed_rsv;
};
/*
@@ -172,7 +175,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
{
spin_lock(&inode->lock);
inode->last_trans = trans->transaction->transid;
- inode->last_sub_trans = inode->root->log_transid;
+ inode->last_sub_trans = btrfs_get_root_log_transid(inode->root);
inode->last_log_commit = inode->last_sub_trans - 1;
spin_unlock(&inode->lock);
}
@@ -200,32 +203,32 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
delayed_refs->qgroup_to_skip = 0;
}
-bool __cold abort_should_print_stack(int errno);
+bool __cold abort_should_print_stack(int error);
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
* detected, that way the exact stack trace is reported for some errors.
*/
-#define btrfs_abort_transaction(trans, errno) \
+#define btrfs_abort_transaction(trans, error) \
do { \
bool first = false; \
/* Report first abort since mount */ \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
first = true; \
- if (WARN(abort_should_print_stack(errno), \
+ if (WARN(abort_should_print_stack(error), \
KERN_ERR \
"BTRFS: Transaction aborted (error %d)\n", \
- (errno))) { \
+ (error))) { \
/* Stack trace printed. */ \
} else { \
btrfs_err((trans)->fs_info, \
"Transaction aborted (error %d)", \
- (errno)); \
+ (error)); \
} \
} \
__btrfs_abort_transaction((trans), __func__, \
- __LINE__, (errno), first); \
+ __LINE__, (error), first); \
} while (0)
int btrfs_end_transaction(struct btrfs_trans_handle *trans);
@@ -243,7 +246,6 @@ struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
void btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_defrag_root(struct btrfs_root *root);
void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
@@ -264,7 +266,7 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
const char *function,
- unsigned int line, int errno, bool first_hit);
+ unsigned int line, int error, bool first_hit);
int __init btrfs_transaction_init(void);
void __cold btrfs_transaction_exit(void);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index ab08a0b01311..a416cbea75d1 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -29,6 +29,8 @@
#include "accessors.h"
#include "file-item.h"
#include "inode-item.h"
+#include "dir-item.h"
+#include "raid-stripe-tree.h"
/*
* Error message should follow the following format:
@@ -1465,6 +1467,9 @@ static int check_extent_item(struct extent_buffer *leaf,
}
inline_refs += btrfs_shared_data_ref_count(leaf, sref);
break;
+ case BTRFS_EXTENT_OWNER_REF_KEY:
+ WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
+ break;
default:
extent_err(leaf, slot, "unknown inline ref type: %u",
inline_type);
@@ -1631,6 +1636,44 @@ static int check_inode_ref(struct extent_buffer *leaf,
return 0;
}
+static int check_raid_stripe_extent(const struct extent_buffer *leaf,
+ const struct btrfs_key *key, int slot)
+{
+ struct btrfs_stripe_extent *stripe_extent =
+ btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
+
+ if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
+ generic_err(leaf, slot,
+"invalid key objectid for raid stripe extent, have %llu expect aligned to %u",
+ key->objectid, leaf->fs_info->sectorsize);
+ return -EUCLEAN;
+ }
+
+ if (unlikely(!btrfs_fs_incompat(leaf->fs_info, RAID_STRIPE_TREE))) {
+ generic_err(leaf, slot,
+ "RAID_STRIPE_EXTENT present but RAID_STRIPE_TREE incompat bit unset");
+ return -EUCLEAN;
+ }
+
+ switch (btrfs_stripe_extent_encoding(leaf, stripe_extent)) {
+ case BTRFS_STRIPE_RAID0:
+ case BTRFS_STRIPE_RAID1:
+ case BTRFS_STRIPE_DUP:
+ case BTRFS_STRIPE_RAID10:
+ case BTRFS_STRIPE_RAID5:
+ case BTRFS_STRIPE_RAID6:
+ case BTRFS_STRIPE_RAID1C3:
+ case BTRFS_STRIPE_RAID1C4:
+ break;
+ default:
+ generic_err(leaf, slot, "invalid raid stripe encoding %u",
+ btrfs_stripe_extent_encoding(leaf, stripe_extent));
+ return -EUCLEAN;
+ }
+
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
@@ -1685,6 +1728,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
case BTRFS_EXTENT_DATA_REF_KEY:
ret = check_extent_data_ref(leaf, key, slot);
break;
+ case BTRFS_RAID_STRIPE_KEY:
+ ret = check_raid_stripe_extent(leaf, key, slot);
+ break;
}
if (ret)
@@ -2005,7 +2051,7 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
* So we only checks tree blocks which is read from disk, whose
* generation <= fs_info->last_trans_committed.
*/
- if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
+ if (btrfs_header_generation(eb) > btrfs_get_last_trans_committed(fs_info))
return 0;
/* We have @first_key, so this @eb must have at least one item */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index cbb17b542131..7d6729d9fd2f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -347,8 +347,7 @@ static int process_one_buffer(struct btrfs_root *log,
}
if (wc->pin) {
- ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
- eb->len);
+ ret = btrfs_pin_extent_for_log_replay(wc->trans, eb);
if (ret)
return ret;
@@ -504,9 +503,9 @@ insert:
found_size = btrfs_item_size(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
- btrfs_truncate_item(path, item_size, 1);
+ btrfs_truncate_item(trans, path, item_size, 1);
else if (found_size < item_size)
- btrfs_extend_item(path, item_size - found_size);
+ btrfs_extend_item(trans, path, item_size - found_size);
} else if (ret) {
return ret;
}
@@ -574,7 +573,7 @@ insert:
}
}
no_copy:
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
return 0;
}
@@ -767,7 +766,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
} else if (ret == 0) {
btrfs_init_generic_ref(&ref,
BTRFS_ADD_DELAYED_REF,
- ins.objectid, ins.offset, 0);
+ ins.objectid, ins.offset, 0,
+ root->root_key.objectid);
btrfs_init_data_ref(&ref,
root->root_key.objectid,
key->objectid, offset, 0, false);
@@ -890,7 +890,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
update_inode:
btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
out:
iput(inode);
return ret;
@@ -1445,7 +1445,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
goto out;
}
@@ -1483,8 +1483,7 @@ out:
return ret;
}
-static int count_inode_extrefs(struct btrfs_root *root,
- struct btrfs_inode *inode, struct btrfs_path *path)
+static int count_inode_extrefs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret = 0;
int name_len;
@@ -1498,8 +1497,8 @@ static int count_inode_extrefs(struct btrfs_root *root,
struct extent_buffer *leaf;
while (1) {
- ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
- &extref, &offset);
+ ret = btrfs_find_one_extref(inode->root, inode_objectid, offset,
+ path, &extref, &offset);
if (ret)
break;
@@ -1527,8 +1526,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
return nlink;
}
-static int count_inode_refs(struct btrfs_root *root,
- struct btrfs_inode *inode, struct btrfs_path *path)
+static int count_inode_refs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret;
struct btrfs_key key;
@@ -1543,7 +1541,7 @@ static int count_inode_refs(struct btrfs_root *root,
key.offset = (u64)-1;
while (1) {
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, inode->root, &key, path, 0, 0);
if (ret < 0)
break;
if (ret > 0) {
@@ -1595,9 +1593,9 @@ process_slot:
* will free the inode.
*/
static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
struct inode *inode)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret;
u64 nlink = 0;
@@ -1607,13 +1605,13 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- ret = count_inode_refs(root, BTRFS_I(inode), path);
+ ret = count_inode_refs(BTRFS_I(inode), path);
if (ret < 0)
goto out;
nlink = ret;
- ret = count_inode_extrefs(root, BTRFS_I(inode), path);
+ ret = count_inode_extrefs(BTRFS_I(inode), path);
if (ret < 0)
goto out;
@@ -1623,7 +1621,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (nlink != inode->i_nlink) {
set_nlink(inode, nlink);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
goto out;
}
@@ -1685,7 +1683,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break;
}
- ret = fixup_inode_link_count(trans, root, inode);
+ ret = fixup_inode_link_count(trans, inode);
iput(inode);
if (ret)
break;
@@ -1732,7 +1730,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
set_nlink(inode, 1);
else
inc_nlink(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
} else if (ret == -EEXIST) {
ret = 0;
}
@@ -1939,7 +1937,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
out:
if (!ret && update_size) {
btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2);
- ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
+ ret = btrfs_update_inode(trans, BTRFS_I(dir));
}
kfree(name.name);
iput(dir);
@@ -2483,7 +2481,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
drop_args.bytes_found);
/* Update the inode's nbytes. */
ret = btrfs_update_inode(wc->trans,
- root, BTRFS_I(inode));
+ BTRFS_I(inode));
}
iput(inode);
if (ret)
@@ -2574,7 +2572,7 @@ static int clean_log_buffer(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(eb);
if (trans) {
- ret = btrfs_pin_reserved_extent(trans, eb->start, eb->len);
+ ret = btrfs_pin_reserved_extent(trans, eb);
if (ret)
return ret;
btrfs_redirty_list_add(trans->transaction, eb);
@@ -2848,10 +2846,9 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
}
/*
- * btrfs_sync_log does sends a given tree log down to the disk and
- * updates the super blocks to record it. When this call is done,
- * you know that any inodes previously logged are safely on disk only
- * if it returns 0.
+ * Sends a given tree log down to the disk and updates the super blocks to
+ * record it. When this call is done, you know that any inodes previously
+ * logged are safely on disk only if it returns 0.
*
* Any other return value means you need to call btrfs_commit_transaction.
* Some of the edge cases for fsyncing directories that have had unlinks
@@ -2961,7 +2958,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_root_node(&log->root_item, log->node);
memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
- root->log_transid++;
+ btrfs_set_root_log_transid(root, root->log_transid + 1);
log->log_transid = root->log_transid;
root->log_start_pid = 0;
/*
@@ -2999,9 +2996,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
*/
ret = update_log_root(trans, log, &new_root_item);
if (ret) {
- if (!list_empty(&root_log_ctx.list))
- list_del_init(&root_log_ctx.list);
-
+ list_del_init(&root_log_ctx.list);
blk_finish_plug(&plug);
btrfs_set_log_full_commit(trans);
if (ret != -ENOSPC)
@@ -3021,7 +3016,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
- index2 = root_log_ctx.log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
ret = btrfs_wait_tree_log_extents(log, mark);
@@ -3136,8 +3130,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* someone else already started it. We use <= and not < because the
* first log transaction has an ID of 0.
*/
- ASSERT(root->last_log_commit <= log_transid);
- root->last_log_commit = log_transid;
+ ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid);
+ btrfs_set_root_last_log_commit(root, log_transid);
out_wake_log_root:
mutex_lock(&log_root_tree->log_mutex);
@@ -3211,8 +3205,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
}
}
- clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
- EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
+ extent_io_tree_release(&log->dirty_log_pages);
extent_io_tree_release(&log->log_csum_range);
btrfs_put_root(log);
@@ -3530,7 +3523,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
last_offset = max(last_offset, curr_end);
}
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
- btrfs_mark_buffer_dirty(path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
return 0;
}
@@ -4138,19 +4131,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
btrfs_set_token_timespec_sec(&token, &item->atime,
- inode->i_atime.tv_sec);
+ inode_get_atime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->atime,
- inode->i_atime.tv_nsec);
+ inode_get_atime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->mtime,
- inode->i_mtime.tv_sec);
+ inode_get_mtime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->mtime,
- inode->i_mtime.tv_nsec);
+ inode_get_mtime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->ctime,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
btrfs_set_token_timespec_nsec(&token, &item->ctime,
- inode_get_ctime(inode).tv_nsec);
+ inode_get_ctime_nsec(inode));
/*
* We do not need to set the nbytes field, in fact during a fast fsync
@@ -4488,7 +4481,7 @@ copy_item:
dst_index++;
}
- btrfs_mark_buffer_dirty(dst_path->nodes[0]);
+ btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
btrfs_release_path(dst_path);
out:
kfree(ins_data);
@@ -4693,7 +4686,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, &fi,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(fi));
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
@@ -4921,12 +4914,12 @@ process:
set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
- spin_lock_irq(&inode->ordered_tree.lock);
+ spin_lock_irq(&inode->ordered_tree_lock);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
atomic_inc(&trans->transaction->pending_ordered);
}
- spin_unlock_irq(&inode->ordered_tree.lock);
+ spin_unlock_irq(&inode->ordered_tree_lock);
}
btrfs_put_ordered_extent(ordered);
}
@@ -7204,9 +7197,7 @@ again:
* each subsequent pass.
*/
if (ret == -ENOENT)
- ret = btrfs_pin_extent_for_log_replay(trans,
- log->node->start,
- log->node->len);
+ ret = btrfs_pin_extent_for_log_replay(trans, log->node);
btrfs_put_root(log);
if (!ret)
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 33606025513d..b4ac2b0cd235 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -223,7 +223,8 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
}
/*
- * ulist_del - delete one node from ulist
+ * Delete one node from ulist.
+ *
* @ulist: ulist to remove node from
* @val: value to delete
* @aux: aux to delete
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 7c7001f42b14..5be74f9e47eb 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -124,7 +124,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
* An item with that type already exists.
* Extend the item and store the new subid at the end.
*/
- btrfs_extend_item(path, sizeof(subid_le));
+ btrfs_extend_item(trans, path, sizeof(subid_le));
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
@@ -139,7 +139,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
ret = 0;
subid_le = cpu_to_le64(subid_cpu);
write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
- btrfs_mark_buffer_dirty(eb);
+ btrfs_mark_buffer_dirty(trans, eb);
out:
btrfs_free_path(path);
@@ -221,7 +221,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
move_src = offset + sizeof(subid);
move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
memmove_extent_buffer(eb, move_dst, move_src, move_len);
- btrfs_truncate_item(path, item_size - sizeof(subid), 1);
+ btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
out:
btrfs_free_path(path);
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index 744f4f4d4c68..66e2270b0dae 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -487,7 +487,7 @@ static int rollback_verity(struct btrfs_inode *inode)
}
inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -554,7 +554,7 @@ static int finish_verity(struct btrfs_inode *inode, const void *desc,
}
inode->ro_flags |= BTRFS_INODE_RO_VERITY;
btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
- ret = btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, inode);
if (ret)
goto end_trans;
ret = del_orphan(trans, inode);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5a5a8d488a7b..c87e18827a0a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -35,6 +35,7 @@
#include "relocation.h"
#include "scrub.h"
#include "super.h"
+#include "raid-stripe-tree.h"
#define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID10 | \
@@ -357,21 +358,19 @@ struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
}
/*
- * alloc_fs_devices - allocate struct btrfs_fs_devices
- * @fsid: if not NULL, copy the UUID to fs_devices::fsid
- * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
+ * Allocate new btrfs_fs_devices structure identified by a fsid.
+ *
+ * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to
+ * fs_devices::metadata_fsid
*
* Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
* The returned struct is not linked onto any lists and can be destroyed with
* kfree() right away.
*/
-static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
- const u8 *metadata_fsid)
+static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{
struct btrfs_fs_devices *fs_devs;
- ASSERT(fsid || !metadata_fsid);
-
fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
if (!fs_devs)
return ERR_PTR(-ENOMEM);
@@ -385,8 +384,7 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
if (fsid) {
memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
- memcpy(fs_devs->metadata_uuid,
- metadata_fsid ?: fsid, BTRFS_FSID_SIZE);
+ memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
}
return fs_devs;
@@ -457,91 +455,41 @@ static noinline struct btrfs_fs_devices *find_fsid(
return NULL;
}
-/*
- * First check if the metadata_uuid is different from the fsid in the given
- * fs_devices. Then check if the given fsid is the same as the metadata_uuid
- * in the fs_devices. If it is, return true; otherwise, return false.
- */
-static inline bool check_fsid_changed(const struct btrfs_fs_devices *fs_devices,
- const u8 *fsid)
-{
- return memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
- BTRFS_FSID_SIZE) != 0 &&
- memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE) == 0;
-}
-
-static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
- struct btrfs_super_block *disk_super)
-{
-
- struct btrfs_fs_devices *fs_devices;
-
- /*
- * Handle scanned device having completed its fsid change but
- * belonging to a fs_devices that was created by first scanning
- * a device which didn't have its fsid/metadata_uuid changed
- * at all and the CHANGING_FSID_V2 flag set.
- */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (!fs_devices->fsid_change)
- continue;
-
- if (match_fsid_fs_devices(fs_devices, disk_super->metadata_uuid,
- fs_devices->fsid))
- return fs_devices;
- }
-
- /*
- * Handle scanned device having completed its fsid change but
- * belonging to a fs_devices that was created by a device that
- * has an outdated pair of fsid/metadata_uuid and
- * CHANGING_FSID_V2 flag set.
- */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (!fs_devices->fsid_change)
- continue;
-
- if (check_fsid_changed(fs_devices, disk_super->metadata_uuid))
- return fs_devices;
- }
-
- return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
-}
-
-
static int
btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
- int flush, struct block_device **bdev,
+ int flush, struct bdev_handle **bdev_handle,
struct btrfs_super_block **disk_super)
{
+ struct block_device *bdev;
int ret;
- *bdev = blkdev_get_by_path(device_path, flags, holder, NULL);
+ *bdev_handle = bdev_open_by_path(device_path, flags, holder, NULL);
- if (IS_ERR(*bdev)) {
- ret = PTR_ERR(*bdev);
+ if (IS_ERR(*bdev_handle)) {
+ ret = PTR_ERR(*bdev_handle);
goto error;
}
+ bdev = (*bdev_handle)->bdev;
if (flush)
- sync_blockdev(*bdev);
- ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
+ sync_blockdev(bdev);
+ ret = set_blocksize(bdev, BTRFS_BDEV_BLOCKSIZE);
if (ret) {
- blkdev_put(*bdev, holder);
+ bdev_release(*bdev_handle);
goto error;
}
- invalidate_bdev(*bdev);
- *disk_super = btrfs_read_dev_super(*bdev);
+ invalidate_bdev(bdev);
+ *disk_super = btrfs_read_dev_super(bdev);
if (IS_ERR(*disk_super)) {
ret = PTR_ERR(*disk_super);
- blkdev_put(*bdev, holder);
+ bdev_release(*bdev_handle);
goto error;
}
return 0;
error:
- *bdev = NULL;
+ *bdev_handle = NULL;
return ret;
}
@@ -562,13 +510,13 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
{
struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
struct btrfs_device *device, *tmp_device;
- int ret = 0;
+ int ret;
+ bool freed = false;
lockdep_assert_held(&uuid_mutex);
- if (devt)
- ret = -ENOENT;
-
+ /* Return good status if there is no instance of devt. */
+ ret = 0;
list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
mutex_lock(&fs_devices->device_list_mutex);
@@ -579,8 +527,7 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
if (devt && devt != device->devt)
continue;
if (fs_devices->opened) {
- /* for an already deleted device return 0 */
- if (devt && ret != 0)
+ if (devt)
ret = -EBUSY;
break;
}
@@ -590,7 +537,7 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
list_del(&device->dev_list);
btrfs_free_device(device);
- ret = 0;
+ freed = true;
}
mutex_unlock(&fs_devices->device_list_mutex);
@@ -601,9 +548,81 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
}
}
+ /* If there is at least one freed device return 0. */
+ if (freed)
+ return 0;
+
return ret;
}
+static struct btrfs_fs_devices *find_fsid_by_device(
+ struct btrfs_super_block *disk_super,
+ dev_t devt, bool *same_fsid_diff_dev)
+{
+ struct btrfs_fs_devices *fsid_fs_devices;
+ struct btrfs_fs_devices *devt_fs_devices;
+ const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
+ bool found_by_devt = false;
+
+ /* Find the fs_device by the usual method, if found use it. */
+ fsid_fs_devices = find_fsid(disk_super->fsid,
+ has_metadata_uuid ? disk_super->metadata_uuid : NULL);
+
+ /* The temp_fsid feature is supported only with single device filesystem. */
+ if (btrfs_super_num_devices(disk_super) != 1)
+ return fsid_fs_devices;
+
+ /*
+ * A seed device is an integral component of the sprout device, which
+ * functions as a multi-device filesystem. So, temp-fsid feature is
+ * not supported.
+ */
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)
+ return fsid_fs_devices;
+
+ /* Try to find a fs_devices by matching devt. */
+ list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) {
+ struct btrfs_device *device;
+
+ list_for_each_entry(device, &devt_fs_devices->devices, dev_list) {
+ if (device->devt == devt) {
+ found_by_devt = true;
+ break;
+ }
+ }
+ if (found_by_devt)
+ break;
+ }
+
+ if (found_by_devt) {
+ /* Existing device. */
+ if (fsid_fs_devices == NULL) {
+ if (devt_fs_devices->opened == 0) {
+ /* Stale device. */
+ return NULL;
+ } else {
+ /* temp_fsid is mounting a subvol. */
+ return devt_fs_devices;
+ }
+ } else {
+ /* Regular or temp_fsid device mounting a subvol. */
+ return devt_fs_devices;
+ }
+ } else {
+ /* New device. */
+ if (fsid_fs_devices == NULL) {
+ return NULL;
+ } else {
+ /* sb::fsid is already used create a new temp_fsid. */
+ *same_fsid_diff_dev = true;
+ return NULL;
+ }
+ }
+
+ /* Not reached. */
+}
+
/*
* This is only used on mount, and we are protected from competing things
* messing with our fs_devices by the uuid_mutex, thus we do not need the
@@ -613,7 +632,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device, blk_mode_t flags,
void *holder)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct btrfs_super_block *disk_super;
u64 devid;
int ret;
@@ -624,7 +643,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
return -EINVAL;
ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
- &bdev, &disk_super);
+ &bdev_handle, &disk_super);
if (ret)
return ret;
@@ -648,21 +667,21 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
fs_devices->seeding = true;
} else {
- if (bdev_read_only(bdev))
+ if (bdev_read_only(bdev_handle->bdev))
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
else
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
}
- if (!bdev_nonrot(bdev))
+ if (!bdev_nonrot(bdev_handle->bdev))
fs_devices->rotating = true;
- if (bdev_max_discard_sectors(bdev))
+ if (bdev_max_discard_sectors(bdev_handle->bdev))
fs_devices->discardable = true;
- device->bdev = bdev;
+ device->bdev_handle = bdev_handle;
+ device->bdev = bdev_handle->bdev;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
- device->holder = holder;
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
@@ -676,7 +695,7 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
error_free_page:
btrfs_release_disk_super(disk_super);
- blkdev_put(bdev, holder);
+ bdev_release(bdev_handle);
return -EINVAL;
}
@@ -690,84 +709,6 @@ u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
}
/*
- * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
- * being created with a disk that has already completed its fsid change. Such
- * disk can belong to an fs which has its FSID changed or to one which doesn't.
- * Handle both cases here.
- */
-static struct btrfs_fs_devices *find_fsid_inprogress(
- struct btrfs_super_block *disk_super)
-{
- struct btrfs_fs_devices *fs_devices;
-
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (fs_devices->fsid_change)
- continue;
-
- if (check_fsid_changed(fs_devices, disk_super->fsid))
- return fs_devices;
- }
-
- return find_fsid(disk_super->fsid, NULL);
-}
-
-static struct btrfs_fs_devices *find_fsid_changed(
- struct btrfs_super_block *disk_super)
-{
- struct btrfs_fs_devices *fs_devices;
-
- /*
- * Handles the case where scanned device is part of an fs that had
- * multiple successful changes of FSID but currently device didn't
- * observe it. Meaning our fsid will be different than theirs. We need
- * to handle two subcases :
- * 1 - The fs still continues to have different METADATA/FSID uuids.
- * 2 - The fs is switched back to its original FSID (METADATA/FSID
- * are equal).
- */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- /* Changed UUIDs */
- if (check_fsid_changed(fs_devices, disk_super->metadata_uuid) &&
- memcmp(fs_devices->fsid, disk_super->fsid,
- BTRFS_FSID_SIZE) != 0)
- return fs_devices;
-
- /* Unchanged UUIDs */
- if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
- BTRFS_FSID_SIZE) == 0 &&
- memcmp(fs_devices->fsid, disk_super->metadata_uuid,
- BTRFS_FSID_SIZE) == 0)
- return fs_devices;
- }
-
- return NULL;
-}
-
-static struct btrfs_fs_devices *find_fsid_reverted_metadata(
- struct btrfs_super_block *disk_super)
-{
- struct btrfs_fs_devices *fs_devices;
-
- /*
- * Handle the case where the scanned device is part of an fs whose last
- * metadata UUID change reverted it to the original FSID. At the same
- * time fs_devices was first created by another constituent device
- * which didn't fully observe the operation. This results in an
- * btrfs_fs_devices created with metadata/fsid different AND
- * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
- * fs_devices equal to the FSID of the disk.
- */
- list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (!fs_devices->fsid_change)
- continue;
-
- if (check_fsid_changed(fs_devices, disk_super->fsid))
- return fs_devices;
- }
-
- return NULL;
-}
-/*
* Add new device to list of registered devices
*
* Returns:
@@ -785,10 +726,16 @@ static noinline struct btrfs_device *device_list_add(const char *path,
u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_t path_devt;
int error;
+ bool same_fsid_diff_dev = false;
bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
- bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
- BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
+
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
+ btrfs_err(NULL,
+"device %s has incomplete metadata_uuid change, please use btrfstune to complete",
+ path);
+ return ERR_PTR(-EAGAIN);
+ }
error = lookup_bdev(path, &path_devt);
if (error) {
@@ -797,27 +744,23 @@ static noinline struct btrfs_device *device_list_add(const char *path,
return ERR_PTR(error);
}
- if (fsid_change_in_progress) {
- if (!has_metadata_uuid)
- fs_devices = find_fsid_inprogress(disk_super);
- else
- fs_devices = find_fsid_changed(disk_super);
- } else if (has_metadata_uuid) {
- fs_devices = find_fsid_with_metadata_uuid(disk_super);
- } else {
- fs_devices = find_fsid_reverted_metadata(disk_super);
- if (!fs_devices)
- fs_devices = find_fsid(disk_super->fsid, NULL);
- }
-
+ fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev);
if (!fs_devices) {
- fs_devices = alloc_fs_devices(disk_super->fsid,
- has_metadata_uuid ? disk_super->metadata_uuid : NULL);
+ fs_devices = alloc_fs_devices(disk_super->fsid);
+ if (has_metadata_uuid)
+ memcpy(fs_devices->metadata_uuid,
+ disk_super->metadata_uuid, BTRFS_FSID_SIZE);
+
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
- fs_devices->fsid_change = fsid_change_in_progress;
+ if (same_fsid_diff_dev) {
+ generate_random_uuid(fs_devices->fsid);
+ fs_devices->temp_fsid = true;
+ pr_info("BTRFS: device %s using temp-fsid %pU\n",
+ path, fs_devices->fsid);
+ }
mutex_lock(&fs_devices->device_list_mutex);
list_add(&fs_devices->fs_list, &fs_uuids);
@@ -832,18 +775,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
mutex_lock(&fs_devices->device_list_mutex);
device = btrfs_find_device(fs_devices, &args);
- /*
- * If this disk has been pulled into an fs devices created by
- * a device which had the CHANGING_FSID_V2 flag then replace the
- * metadata_uuid/fsid values of the fs_devices.
- */
- if (fs_devices->fsid_change &&
- found_transid > fs_devices->latest_generation) {
+ if (found_transid > fs_devices->latest_generation) {
memcpy(fs_devices->fsid, disk_super->fsid,
BTRFS_FSID_SIZE);
memcpy(fs_devices->metadata_uuid,
btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE);
- fs_devices->fsid_change = false;
}
}
@@ -997,7 +933,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
lockdep_assert_held(&uuid_mutex);
- fs_devices = alloc_fs_devices(orig->fsid, NULL);
+ fs_devices = alloc_fs_devices(orig->fsid);
if (IS_ERR(fs_devices))
return fs_devices;
@@ -1068,9 +1004,10 @@ static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
if (device->devid == BTRFS_DEV_REPLACE_DEVID)
continue;
- if (device->bdev) {
- blkdev_put(device->bdev, device->holder);
+ if (device->bdev_handle) {
+ bdev_release(device->bdev_handle);
device->bdev = NULL;
+ device->bdev_handle = NULL;
fs_devices->open_devices--;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
@@ -1115,7 +1052,7 @@ static void btrfs_close_bdev(struct btrfs_device *device)
invalidate_bdev(device->bdev);
}
- blkdev_put(device->bdev, device->holder);
+ bdev_release(device->bdev_handle);
}
static void btrfs_close_one_device(struct btrfs_device *device)
@@ -1356,14 +1293,19 @@ int btrfs_forget_devices(dev_t devt)
/*
* Look for a btrfs signature on a device. This may be called out of the mount path
* and we are not allowed to call set_blocksize during the scan. The superblock
- * is read via pagecache
+ * is read via pagecache.
+ *
+ * With @mount_arg_dev it's a scan during mount time that will always register
+ * the device or return an error. Multi-device and seeding devices are registered
+ * in both cases.
*/
-struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags)
+struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ bool mount_arg_dev)
{
struct btrfs_super_block *disk_super;
bool new_device_added = false;
struct btrfs_device *device = NULL;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
u64 bytenr, bytenr_orig;
int ret;
@@ -1386,31 +1328,49 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags)
* values temporarily, as the device paths of the fsid are the only
* required information for assembling the volume.
*/
- bdev = blkdev_get_by_path(path, flags, NULL, NULL);
- if (IS_ERR(bdev))
- return ERR_CAST(bdev);
+ bdev_handle = bdev_open_by_path(path, flags, NULL, NULL);
+ if (IS_ERR(bdev_handle))
+ return ERR_CAST(bdev_handle);
bytenr_orig = btrfs_sb_offset(0);
- ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
+ ret = btrfs_sb_log_location_bdev(bdev_handle->bdev, 0, READ, &bytenr);
if (ret) {
device = ERR_PTR(ret);
goto error_bdev_put;
}
- disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
+ disk_super = btrfs_read_disk_super(bdev_handle->bdev, bytenr,
+ bytenr_orig);
if (IS_ERR(disk_super)) {
device = ERR_CAST(disk_super);
goto error_bdev_put;
}
+ if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 &&
+ !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) {
+ dev_t devt;
+
+ ret = lookup_bdev(path, &devt);
+ if (ret)
+ btrfs_warn(NULL, "lookup bdev failed for path %s: %d",
+ path, ret);
+ else
+ btrfs_free_stale_devices(devt, NULL);
+
+ pr_debug("BTRFS: skip registering single non-seed device %s\n", path);
+ device = NULL;
+ goto free_disk_super;
+ }
+
device = device_list_add(path, disk_super, &new_device_added);
if (!IS_ERR(device) && new_device_added)
btrfs_free_stale_devices(device->devt, device);
+free_disk_super:
btrfs_release_disk_super(disk_super);
error_bdev_put:
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
return device;
}
@@ -1894,7 +1854,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_fsid(dev_item);
write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
ptr, BTRFS_FSID_SIZE);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
out:
@@ -2087,7 +2047,7 @@ void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
- struct block_device **bdev, void **holder)
+ struct bdev_handle **bdev_handle)
{
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
@@ -2196,7 +2156,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
btrfs_assign_next_active_device(device, NULL);
- if (device->bdev) {
+ if (device->bdev_handle) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_remove_device(device);
@@ -2212,9 +2172,9 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
* free the device.
*
* We cannot call btrfs_close_bdev() here because we're holding the sb
- * write lock, and blkdev_put() will pull in the ->open_mutex on the
- * block device and it's dependencies. Instead just flush the device
- * and let the caller do the final blkdev_put.
+ * write lock, and bdev_release() will pull in the ->open_mutex on
+ * the block device and it's dependencies. Instead just flush the
+ * device and let the caller do the final bdev_release.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
btrfs_scratch_superblocks(fs_info, device->bdev,
@@ -2225,8 +2185,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
}
}
- *bdev = device->bdev;
- *holder = device->holder;
+ *bdev_handle = device->bdev_handle;
synchronize_rcu();
btrfs_free_device(device);
@@ -2363,7 +2322,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
const char *path)
{
struct btrfs_super_block *disk_super;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int ret;
if (!path || !path[0])
@@ -2381,7 +2340,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
}
ret = btrfs_get_bdev_and_sb(path, BLK_OPEN_READ, NULL, 0,
- &bdev, &disk_super);
+ &bdev_handle, &disk_super);
if (ret) {
btrfs_put_dev_args_from_path(args);
return ret;
@@ -2394,7 +2353,7 @@ int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
else
memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
btrfs_release_disk_super(disk_super);
- blkdev_put(bdev, NULL);
+ bdev_release(bdev_handle);
return 0;
}
@@ -2451,7 +2410,7 @@ static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
* Private copy of the seed devices, anchored at
* fs_info->fs_devices->seed_list
*/
- seed_devices = alloc_fs_devices(NULL, NULL);
+ seed_devices = alloc_fs_devices(NULL);
if (IS_ERR(seed_devices))
return seed_devices;
@@ -2597,7 +2556,7 @@ next_slot:
if (device->fs_devices->seeding) {
btrfs_set_device_generation(leaf, dev_item,
device->generation);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
}
path->slots[0]++;
@@ -2614,7 +2573,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct super_block *sb = fs_info->sb;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *seed_devices = NULL;
@@ -2627,12 +2586,12 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
if (sb_rdonly(sb) && !fs_devices->seeding)
return -EROFS;
- bdev = blkdev_get_by_path(device_path, BLK_OPEN_WRITE,
- fs_info->bdev_holder, NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_path(device_path, BLK_OPEN_WRITE,
+ fs_info->bdev_holder, NULL);
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
- if (!btrfs_check_device_zone_type(fs_info, bdev)) {
+ if (!btrfs_check_device_zone_type(fs_info, bdev_handle->bdev)) {
ret = -EINVAL;
goto error;
}
@@ -2644,11 +2603,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
locked = true;
}
- sync_blockdev(bdev);
+ sync_blockdev(bdev_handle->bdev);
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
- if (device->bdev == bdev) {
+ if (device->bdev == bdev_handle->bdev) {
ret = -EEXIST;
rcu_read_unlock();
goto error;
@@ -2664,7 +2623,8 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
}
device->fs_info = fs_info;
- device->bdev = bdev;
+ device->bdev_handle = bdev_handle;
+ device->bdev = bdev_handle->bdev;
ret = lookup_bdev(device_path, &device->devt);
if (ret)
goto error_free_device;
@@ -2685,12 +2645,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
device->io_align = fs_info->sectorsize;
device->sector_size = fs_info->sectorsize;
device->total_bytes =
- round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
+ round_down(bdev_nr_bytes(device->bdev), fs_info->sectorsize);
device->disk_total_bytes = device->total_bytes;
device->commit_total_bytes = device->total_bytes;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
- device->holder = fs_info->bdev_holder;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
@@ -2726,7 +2685,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
- if (!bdev_nonrot(bdev))
+ if (!bdev_nonrot(device->bdev))
fs_devices->rotating = true;
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
@@ -2848,7 +2807,7 @@ error_free_zone:
error_free_device:
btrfs_free_device(device);
error:
- blkdev_put(bdev, fs_info->bdev_holder);
+ bdev_release(bdev_handle);
if (locked) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
@@ -2895,7 +2854,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
@@ -2929,6 +2888,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
btrfs_set_super_total_bytes(super_copy,
round_down(old_total + diff, fs_info->sectorsize));
device->fs_devices->total_rw_bytes += diff;
+ atomic64_add(diff, &fs_info->free_chunk_space);
btrfs_device_set_total_bytes(device, new_size);
btrfs_device_set_disk_total_bytes(device, new_size);
@@ -3027,7 +2987,8 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
}
/*
- * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
+ * Find the mapping containing the given logical extent.
+ *
* @logical: Logical block offset in bytes.
* @length: Length of extent in bytes.
*
@@ -3483,7 +3444,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
btrfs_set_balance_flags(leaf, item, bctl->flags);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
err = btrfs_commit_transaction(trans);
@@ -4838,6 +4799,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
u64 old_size = btrfs_device_get_total_bytes(device);
u64 diff;
u64 start;
+ u64 free_diff = 0;
new_size = round_down(new_size, fs_info->sectorsize);
start = new_size;
@@ -4863,7 +4825,19 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
btrfs_device_set_total_bytes(device, new_size);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes -= diff;
- atomic64_sub(diff, &fs_info->free_chunk_space);
+
+ /*
+ * The new free_chunk_space is new_size - used, so we have to
+ * subtract the delta of the old free_chunk_space which included
+ * old_size - used. If used > new_size then just subtract this
+ * entire device's free space.
+ */
+ if (device->bytes_used < new_size)
+ free_diff = (old_size - device->bytes_used) -
+ (new_size - device->bytes_used);
+ else
+ free_diff = old_size - device->bytes_used;
+ atomic64_sub(free_diff, &fs_info->free_chunk_space);
}
/*
@@ -4998,9 +4972,10 @@ done:
if (ret) {
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, old_size);
- if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
+ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes += diff;
- atomic64_add(diff, &fs_info->free_chunk_space);
+ atomic64_add(free_diff, &fs_info->free_chunk_space);
+ }
mutex_unlock(&fs_info->chunk_mutex);
}
return ret;
@@ -5109,7 +5084,7 @@ static void init_alloc_chunk_ctl_policy_regular(
ASSERT(space_info);
ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
- ctl->max_stripe_size = ctl->max_chunk_size;
+ ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
@@ -5880,6 +5855,7 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
}
static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
+ u64 logical,
u16 total_stripes)
{
struct btrfs_io_context *bioc;
@@ -5899,6 +5875,7 @@ static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_
bioc->fs_info = fs_info;
bioc->replace_stripe_src = -1;
bioc->full_stripe_logical = (u64)-1;
+ bioc->logical = logical;
return bioc;
}
@@ -6203,12 +6180,20 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
return U64_MAX;
}
-static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map,
- u32 stripe_index, u64 stripe_offset, u32 stripe_nr)
+static int set_io_stripe(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
+ u64 logical, u64 *length, struct btrfs_io_stripe *dst,
+ struct map_lookup *map, u32 stripe_index,
+ u64 stripe_offset, u64 stripe_nr)
{
dst->dev = map->stripes[stripe_index].dev;
+
+ if (op == BTRFS_MAP_READ && btrfs_need_stripe_tree_update(fs_info, map->type))
+ return btrfs_get_raid_extent_offset(fs_info, logical, length,
+ map->type, stripe_index, dst);
+
dst->physical = map->stripes[stripe_index].physical +
stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
+ return 0;
}
/*
@@ -6245,16 +6230,11 @@ static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *
* For RAID6 profile, mirror > 2 means mark another
* data/P stripe error and rebuild from the remaining
* stripes..
- *
- * @need_raid_map: (Used only for integrity checker) whether the map wants
- * a full stripe map (including all data and P/Q stripes)
- * for RAID56. Should always be 1 except integrity checker.
*/
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_io_context **bioc_ret,
- struct btrfs_io_stripe *smap, int *mirror_num_ret,
- int need_raid_map)
+ struct btrfs_io_stripe *smap, int *mirror_num_ret)
{
struct extent_map *em;
struct map_lookup *map;
@@ -6349,8 +6329,10 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
}
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
- if (need_raid_map && (op != BTRFS_MAP_READ || mirror_num > 1)) {
+ if (op != BTRFS_MAP_READ || mirror_num > 1) {
/*
+ * Needs full stripe mapping.
+ *
* Push stripe_nr back to the start of the full stripe
* For those cases needing a full stripe, @stripe_nr
* is the full stripe number.
@@ -6373,19 +6355,14 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
stripe_index = 0;
stripe_offset = 0;
} else {
- /*
- * Mirror #0 or #1 means the original data block.
- * Mirror #2 is RAID5 parity block.
- * Mirror #3 is RAID6 Q block.
- */
+ ASSERT(mirror_num <= 1);
+ /* Just grab the data stripe directly. */
stripe_index = stripe_nr % data_stripes;
stripe_nr /= data_stripes;
- if (mirror_num > 1)
- stripe_index = data_stripes + mirror_num - 2;
/* We distribute the parity blocks across stripes */
stripe_index = (stripe_nr + stripe_index) % map->num_stripes;
- if (op == BTRFS_MAP_READ && mirror_num <= 1)
+ if (op == BTRFS_MAP_READ && mirror_num < 1)
mirror_num = 1;
}
} else {
@@ -6424,16 +6401,18 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* I/O context structure.
*/
if (smap && num_alloc_stripes == 1 &&
+ !(btrfs_need_stripe_tree_update(fs_info, map->type) &&
+ op != BTRFS_MAP_READ) &&
!((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)) {
- set_io_stripe(smap, map, stripe_index, stripe_offset, stripe_nr);
+ ret = set_io_stripe(fs_info, op, logical, length, smap, map,
+ stripe_index, stripe_offset, stripe_nr);
if (mirror_num_ret)
*mirror_num_ret = mirror_num;
*bioc_ret = NULL;
- ret = 0;
goto out;
}
- bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes);
+ bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
if (!bioc) {
ret = -ENOMEM;
goto out;
@@ -6447,7 +6426,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
*
* It's still mostly the same as other profiles, just with extra rotation.
*/
- if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
+ if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
(op != BTRFS_MAP_READ || mirror_num > 1)) {
/*
* For RAID56 @stripe_nr is already the number of full stripes
@@ -6459,22 +6438,35 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
*/
bioc->full_stripe_logical = em->start +
btrfs_stripe_nr_to_offset(stripe_nr * data_stripes);
- for (i = 0; i < num_stripes; i++)
- set_io_stripe(&bioc->stripes[i], map,
- (i + stripe_nr) % num_stripes,
- stripe_offset, stripe_nr);
+ for (int i = 0; i < num_stripes; i++) {
+ ret = set_io_stripe(fs_info, op, logical, length,
+ &bioc->stripes[i], map,
+ (i + stripe_nr) % num_stripes,
+ stripe_offset, stripe_nr);
+ if (ret < 0)
+ break;
+ }
} else {
/*
* For all other non-RAID56 profiles, just copy the target
* stripe into the bioc.
*/
for (i = 0; i < num_stripes; i++) {
- set_io_stripe(&bioc->stripes[i], map, stripe_index,
- stripe_offset, stripe_nr);
+ ret = set_io_stripe(fs_info, op, logical, length,
+ &bioc->stripes[i], map, stripe_index,
+ stripe_offset, stripe_nr);
+ if (ret < 0)
+ break;
stripe_index++;
}
}
+ if (ret) {
+ *bioc_ret = NULL;
+ btrfs_put_bioc(bioc);
+ goto out;
+ }
+
if (op != BTRFS_MAP_READ)
max_errors = btrfs_chunk_max_errors(map);
@@ -6901,7 +6893,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
if (!btrfs_test_opt(fs_info, DEGRADED))
return ERR_PTR(-ENOENT);
- fs_devices = alloc_fs_devices(fsid, NULL);
+ fs_devices = alloc_fs_devices(fsid);
if (IS_ERR(fs_devices))
return fs_devices;
@@ -7534,7 +7526,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_set_dev_stats_value(eb, ptr, i,
btrfs_dev_stat_read(device, i));
- btrfs_mark_buffer_dirty(eb);
+ btrfs_mark_buffer_dirty(trans, eb);
out:
btrfs_free_path(path);
@@ -8076,7 +8068,7 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
ASSERT(mirror_num > 0);
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
- &bioc, smap, &mirror_ret, true);
+ &bioc, smap, &mirror_ret);
if (ret < 0)
return ret;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 2128a032c3b7..9cc374864a79 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -90,13 +90,11 @@ struct btrfs_device {
u64 generation;
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
struct btrfs_zoned_device_info *zone_info;
- /* block device holder for blkdev_get/put */
- void *holder;
-
/*
* Device's major-minor number. Must be set even if the device is not
* opened (bdev == NULL), unless the device is missing.
@@ -290,6 +288,19 @@ struct btrfs_fs_devices {
* - Following shall be true at all times:
* - metadata_uuid == btrfs_header::fsid
* - metadata_uuid == btrfs_dev_item::fsid
+ *
+ * - Relations between fsid and metadata_uuid in sb and fs_devices:
+ * - Normal:
+ * fs_devices->fsid == fs_devices->metadata_uuid == sb->fsid
+ * sb->metadata_uuid == 0
+ *
+ * - When the BTRFS_FEATURE_INCOMPAT_METADATA_UUID flag is set:
+ * fs_devices->fsid == sb->fsid
+ * fs_devices->metadata_uuid == sb->metadata_uuid
+ *
+ * - When in-memory fs_devices->temp_fsid is true
+ * fs_devices->fsid = random
+ * fs_devices->metadata_uuid == sb->fsid
*/
u8 metadata_uuid[BTRFS_FSID_SIZE];
@@ -353,9 +364,10 @@ struct btrfs_fs_devices {
bool rotating;
/* Devices support TRIM/discard commands. */
bool discardable;
- bool fsid_change;
/* The filesystem is a seed filesystem. */
bool seeding;
+ /* The mount needs to use a randomly generated fsid. */
+ bool temp_fsid;
struct btrfs_fs_info *fs_info;
/* sysfs kobjects */
@@ -381,12 +393,12 @@ struct btrfs_fs_devices {
struct btrfs_io_stripe {
struct btrfs_device *dev;
- union {
- /* Block mapping */
- u64 physical;
- /* For the endio handler */
- struct btrfs_io_context *bioc;
- };
+ /* Block mapping. */
+ u64 physical;
+ u64 length;
+ bool is_scrub;
+ /* For the endio handler. */
+ struct btrfs_io_context *bioc;
};
struct btrfs_discard_stripe {
@@ -419,6 +431,11 @@ struct btrfs_io_context {
atomic_t error;
u16 max_errors;
+ u64 logical;
+ u64 size;
+ /* Raid stripe tree ordered entry. */
+ struct list_head rst_ordered_entry;
+
/*
* The total number of stripes, including the extra duplicated
* stripe for replace.
@@ -596,8 +613,7 @@ void btrfs_put_bioc(struct btrfs_io_context *bioc);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_io_context **bioc_ret,
- struct btrfs_io_stripe *smap, int *mirror_num_ret,
- int need_raid_map);
+ struct btrfs_io_stripe *smap, int *mirror_num_ret);
int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
struct btrfs_io_stripe *smap, u64 logical,
u32 length, int mirror_num);
@@ -611,7 +627,8 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
blk_mode_t flags, void *holder);
-struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags);
+struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
+ bool mount_arg_dev);
int btrfs_forget_devices(dev_t devt);
void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);
@@ -629,7 +646,7 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args);
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
struct btrfs_dev_lookup_args *args,
- struct block_device **bdev, void **holder);
+ struct bdev_handle **bdev_handle);
void __exit btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 96828a13dd43..3cf236fb40a4 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -188,15 +188,15 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
if (old_data_len + name_len + sizeof(*di) == item_size) {
/* No other xattrs packed in the same leaf item. */
if (size > old_data_len)
- btrfs_extend_item(path, size - old_data_len);
+ btrfs_extend_item(trans, path, size - old_data_len);
else if (size < old_data_len)
- btrfs_truncate_item(path, data_size, 1);
+ btrfs_truncate_item(trans, path, data_size, 1);
} else {
/* There are other xattrs packed in the same item. */
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto out;
- btrfs_extend_item(path, data_size);
+ btrfs_extend_item(trans, path, data_size);
}
ptr = btrfs_item_ptr(leaf, slot, char);
@@ -205,7 +205,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
btrfs_set_dir_data_len(leaf, di, size);
data_ptr = ((unsigned long)(di + 1)) + name_len;
write_extent_buffer(leaf, value, data_ptr, size);
- btrfs_mark_buffer_dirty(leaf);
+ btrfs_mark_buffer_dirty(trans, leaf);
} else {
/*
* Insert, and we had space for the xattr, so path->slots[0] is
@@ -265,7 +265,7 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@@ -408,7 +408,7 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
if (!ret) {
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
- ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
+ ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
btrfs_abort_transaction(trans, ret);
}
@@ -442,7 +442,7 @@ static const struct xattr_handler btrfs_btrfs_xattr_handler = {
.set = btrfs_xattr_handler_set_prop,
};
-const struct xattr_handler *btrfs_xattr_handlers[] = {
+const struct xattr_handler * const btrfs_xattr_handlers[] = {
&btrfs_security_xattr_handler,
&btrfs_trusted_xattr_handler,
&btrfs_user_xattr_handler,
diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h
index 1cd3fc0a8f17..118118ca3e1d 100644
--- a/fs/btrfs/xattr.h
+++ b/fs/btrfs/xattr.h
@@ -8,7 +8,7 @@
#include <linux/xattr.h>
-extern const struct xattr_handler *btrfs_xattr_handlers[];
+extern const struct xattr_handler * const btrfs_xattr_handlers[];
int btrfs_getxattr(struct inode *inode, const char *name,
void *buffer, size_t size);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 09bc325d075d..3504ade30cb0 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1282,21 +1282,284 @@ out:
return ret;
}
+struct zone_info {
+ u64 physical;
+ u64 capacity;
+ u64 alloc_offset;
+};
+
+static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
+ struct zone_info *info, unsigned long *active,
+ struct map_lookup *map)
+{
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
+ struct btrfs_device *device = map->stripes[zone_idx].dev;
+ int dev_replace_is_ongoing = 0;
+ unsigned int nofs_flag;
+ struct blk_zone zone;
+ int ret;
+
+ info->physical = map->stripes[zone_idx].physical;
+
+ if (!device->bdev) {
+ info->alloc_offset = WP_MISSING_DEV;
+ return 0;
+ }
+
+ /* Consider a zone as active if we can allow any number of active zones. */
+ if (!device->zone_info->max_active_zones)
+ __set_bit(zone_idx, active);
+
+ if (!btrfs_dev_is_sequential(device, info->physical)) {
+ info->alloc_offset = WP_CONVENTIONAL;
+ return 0;
+ }
+
+ /* This zone will be used for allocation, so mark this zone non-empty. */
+ btrfs_dev_clear_zone_empty(device, info->physical);
+
+ down_read(&dev_replace->rwsem);
+ dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
+ btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
+ up_read(&dev_replace->rwsem);
+
+ /*
+ * The group is mapped to a sequential zone. Get the zone write pointer
+ * to determine the allocation offset within the zone.
+ */
+ WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
+ nofs_flag = memalloc_nofs_save();
+ ret = btrfs_get_dev_zone(device, info->physical, &zone);
+ memalloc_nofs_restore(nofs_flag);
+ if (ret) {
+ if (ret != -EIO && ret != -EOPNOTSUPP)
+ return ret;
+ info->alloc_offset = WP_MISSING_DEV;
+ return 0;
+ }
+
+ if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ btrfs_err_in_rcu(fs_info,
+ "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
+ zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
+ device->devid);
+ return -EIO;
+ }
+
+ info->capacity = (zone.capacity << SECTOR_SHIFT);
+
+ switch (zone.cond) {
+ case BLK_ZONE_COND_OFFLINE:
+ case BLK_ZONE_COND_READONLY:
+ btrfs_err(fs_info,
+ "zoned: offline/readonly zone %llu on device %s (devid %llu)",
+ (info->physical >> device->zone_info->zone_size_shift),
+ rcu_str_deref(device->name), device->devid);
+ info->alloc_offset = WP_MISSING_DEV;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ info->alloc_offset = 0;
+ break;
+ case BLK_ZONE_COND_FULL:
+ info->alloc_offset = info->capacity;
+ break;
+ default:
+ /* Partially used zone. */
+ info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
+ __set_bit(zone_idx, active);
+ break;
+ }
+
+ return 0;
+}
+
+static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
+ struct zone_info *info,
+ unsigned long *active)
+{
+ if (info->alloc_offset == WP_MISSING_DEV) {
+ btrfs_err(bg->fs_info,
+ "zoned: cannot recover write pointer for zone %llu",
+ info->physical);
+ return -EIO;
+ }
+
+ bg->alloc_offset = info->alloc_offset;
+ bg->zone_capacity = info->capacity;
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ return 0;
+}
+
+static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
+ struct map_lookup *map,
+ struct zone_info *zone_info,
+ unsigned long *active)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
+ return -EINVAL;
+ }
+
+ if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
+ btrfs_err(bg->fs_info,
+ "zoned: cannot recover write pointer for zone %llu",
+ zone_info[0].physical);
+ return -EIO;
+ }
+ if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
+ btrfs_err(bg->fs_info,
+ "zoned: cannot recover write pointer for zone %llu",
+ zone_info[1].physical);
+ return -EIO;
+ }
+ if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
+ btrfs_err(bg->fs_info,
+ "zoned: write pointer offset mismatch of zones in DUP profile");
+ return -EIO;
+ }
+
+ if (test_bit(0, active) != test_bit(1, active)) {
+ if (!btrfs_zone_activate(bg))
+ return -EIO;
+ } else if (test_bit(0, active)) {
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+
+ bg->alloc_offset = zone_info[0].alloc_offset;
+ bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
+ return 0;
+}
+
+static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
+ struct map_lookup *map,
+ struct zone_info *zone_info,
+ unsigned long *active)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ int i;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
+ zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ continue;
+
+ if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
+ !btrfs_test_opt(fs_info, DEGRADED)) {
+ btrfs_err(fs_info,
+ "zoned: write pointer offset mismatch of zones in %s profile",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EIO;
+ }
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (!btrfs_test_opt(fs_info, DEGRADED) &&
+ !btrfs_zone_activate(bg)) {
+ return -EIO;
+ }
+ } else {
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+ /* In case a device is missing we have a cap of 0, so don't use it. */
+ bg->zone_capacity = min_not_zero(zone_info[0].capacity,
+ zone_info[1].capacity);
+ }
+
+ if (zone_info[0].alloc_offset != WP_MISSING_DEV)
+ bg->alloc_offset = zone_info[0].alloc_offset;
+ else
+ bg->alloc_offset = zone_info[i - 1].alloc_offset;
+
+ return 0;
+}
+
+static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
+ struct map_lookup *map,
+ struct zone_info *zone_info,
+ unsigned long *active)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
+ zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ continue;
+
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (!btrfs_zone_activate(bg))
+ return -EIO;
+ } else {
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+ bg->zone_capacity += zone_info[i].capacity;
+ bg->alloc_offset += zone_info[i].alloc_offset;
+ }
+
+ return 0;
+}
+
+static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
+ struct map_lookup *map,
+ struct zone_info *zone_info,
+ unsigned long *active)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
+ }
+
+ for (int i = 0; i < map->num_stripes; i++) {
+ if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
+ zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ continue;
+
+ if (test_bit(0, active) != test_bit(i, active)) {
+ if (!btrfs_zone_activate(bg))
+ return -EIO;
+ } else {
+ if (test_bit(0, active))
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
+ }
+
+ if ((i % map->sub_stripes) == 0) {
+ bg->zone_capacity += zone_info[i].capacity;
+ bg->alloc_offset += zone_info[i].alloc_offset;
+ }
+ }
+
+ return 0;
+}
+
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
- struct btrfs_device *device;
u64 logical = cache->start;
u64 length = cache->length;
+ struct zone_info *zone_info = NULL;
int ret;
int i;
- unsigned int nofs_flag;
- u64 *alloc_offsets = NULL;
- u64 *caps = NULL;
- u64 *physical = NULL;
unsigned long *active = NULL;
u64 last_alloc = 0;
u32 num_sequential = 0, num_conventional = 0;
@@ -1328,20 +1591,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out;
}
- alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
- if (!alloc_offsets) {
- ret = -ENOMEM;
- goto out;
- }
-
- caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
- if (!caps) {
- ret = -ENOMEM;
- goto out;
- }
-
- physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
- if (!physical) {
+ zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
+ if (!zone_info) {
ret = -ENOMEM;
goto out;
}
@@ -1353,98 +1604,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
for (i = 0; i < map->num_stripes; i++) {
- bool is_sequential;
- struct blk_zone zone;
- struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- int dev_replace_is_ongoing = 0;
-
- device = map->stripes[i].dev;
- physical[i] = map->stripes[i].physical;
-
- if (device->bdev == NULL) {
- alloc_offsets[i] = WP_MISSING_DEV;
- continue;
- }
-
- is_sequential = btrfs_dev_is_sequential(device, physical[i]);
- if (is_sequential)
- num_sequential++;
- else
- num_conventional++;
-
- /*
- * Consider a zone as active if we can allow any number of
- * active zones.
- */
- if (!device->zone_info->max_active_zones)
- __set_bit(i, active);
-
- if (!is_sequential) {
- alloc_offsets[i] = WP_CONVENTIONAL;
- continue;
- }
-
- /*
- * This zone will be used for allocation, so mark this zone
- * non-empty.
- */
- btrfs_dev_clear_zone_empty(device, physical[i]);
-
- down_read(&dev_replace->rwsem);
- dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
- if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
- btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
- up_read(&dev_replace->rwsem);
-
- /*
- * The group is mapped to a sequential zone. Get the zone write
- * pointer to determine the allocation offset within the zone.
- */
- WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
- nofs_flag = memalloc_nofs_save();
- ret = btrfs_get_dev_zone(device, physical[i], &zone);
- memalloc_nofs_restore(nofs_flag);
- if (ret == -EIO || ret == -EOPNOTSUPP) {
- ret = 0;
- alloc_offsets[i] = WP_MISSING_DEV;
- continue;
- } else if (ret) {
- goto out;
- }
-
- if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
- btrfs_err_in_rcu(fs_info,
- "zoned: unexpected conventional zone %llu on device %s (devid %llu)",
- zone.start << SECTOR_SHIFT,
- rcu_str_deref(device->name), device->devid);
- ret = -EIO;
+ ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
+ if (ret)
goto out;
- }
- caps[i] = (zone.capacity << SECTOR_SHIFT);
-
- switch (zone.cond) {
- case BLK_ZONE_COND_OFFLINE:
- case BLK_ZONE_COND_READONLY:
- btrfs_err(fs_info,
- "zoned: offline/readonly zone %llu on device %s (devid %llu)",
- physical[i] >> device->zone_info->zone_size_shift,
- rcu_str_deref(device->name), device->devid);
- alloc_offsets[i] = WP_MISSING_DEV;
- break;
- case BLK_ZONE_COND_EMPTY:
- alloc_offsets[i] = 0;
- break;
- case BLK_ZONE_COND_FULL:
- alloc_offsets[i] = caps[i];
- break;
- default:
- /* Partially used zone */
- alloc_offsets[i] =
- ((zone.wp - zone.start) << SECTOR_SHIFT);
- __set_bit(i, active);
- break;
- }
+ if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
+ num_conventional++;
+ else
+ num_sequential++;
}
if (num_sequential > 0)
@@ -1468,63 +1635,24 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
case 0: /* single */
- if (alloc_offsets[0] == WP_MISSING_DEV) {
- btrfs_err(fs_info,
- "zoned: cannot recover write pointer for zone %llu",
- physical[0]);
- ret = -EIO;
- goto out;
- }
- cache->alloc_offset = alloc_offsets[0];
- cache->zone_capacity = caps[0];
- if (test_bit(0, active))
- set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
+ ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
break;
case BTRFS_BLOCK_GROUP_DUP:
- if (map->type & BTRFS_BLOCK_GROUP_DATA) {
- btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
- ret = -EINVAL;
- goto out;
- }
- if (alloc_offsets[0] == WP_MISSING_DEV) {
- btrfs_err(fs_info,
- "zoned: cannot recover write pointer for zone %llu",
- physical[0]);
- ret = -EIO;
- goto out;
- }
- if (alloc_offsets[1] == WP_MISSING_DEV) {
- btrfs_err(fs_info,
- "zoned: cannot recover write pointer for zone %llu",
- physical[1]);
- ret = -EIO;
- goto out;
- }
- if (alloc_offsets[0] != alloc_offsets[1]) {
- btrfs_err(fs_info,
- "zoned: write pointer offset mismatch of zones in DUP profile");
- ret = -EIO;
- goto out;
- }
- if (test_bit(0, active) != test_bit(1, active)) {
- if (!btrfs_zone_activate(cache)) {
- ret = -EIO;
- goto out;
- }
- } else {
- if (test_bit(0, active))
- set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
- &cache->runtime_flags);
- }
- cache->alloc_offset = alloc_offsets[0];
- cache->zone_capacity = min(caps[0], caps[1]);
+ ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
break;
case BTRFS_BLOCK_GROUP_RAID1:
+ case BTRFS_BLOCK_GROUP_RAID1C3:
+ case BTRFS_BLOCK_GROUP_RAID1C4:
+ ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
+ break;
case BTRFS_BLOCK_GROUP_RAID0:
+ ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
+ break;
case BTRFS_BLOCK_GROUP_RAID10:
+ ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
+ break;
case BTRFS_BLOCK_GROUP_RAID5:
case BTRFS_BLOCK_GROUP_RAID6:
- /* non-single profiles are not supported yet */
default:
btrfs_err(fs_info, "zoned: profile %s not yet supported",
btrfs_bg_type_to_raid_name(map->type));
@@ -1570,9 +1698,7 @@ out:
cache->physical_map = NULL;
}
bitmap_free(active);
- kfree(physical);
- kfree(caps);
- kfree(alloc_offsets);
+ kfree(zone_info);
free_extent_map(em);
return ret;
@@ -1609,7 +1735,7 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
set_extent_buffer_dirty(eb);
set_extent_bit(&trans->dirty_pages, eb->start, eb->start + eb->len - 1,
- EXTENT_DIRTY | EXTENT_NOWAIT, NULL);
+ EXTENT_DIRTY, NULL);
}
bool btrfs_use_zone_append(struct btrfs_bio *bbio)
@@ -1887,7 +2013,7 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
int i, ret;
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
- &mapped_length, &bioc, NULL, NULL, 1);
+ &mapped_length, &bioc, NULL, NULL);
if (ret || !bioc || mapped_length < PAGE_SIZE) {
ret = -EIO;
goto out_put_bioc;
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index e7ac4ec809a4..5511766485cd 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -145,7 +145,7 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
}
/*
- * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
+ * Calculate monotonic memory bounds.
*
* It is possible based on the level configurations that a higher level
* workspace uses less memory than a lower level workspace. In order to reuse
@@ -218,7 +218,8 @@ void zstd_cleanup_workspace_manager(void)
}
/*
- * zstd_find_workspace - find workspace
+ * Find workspace for given level.
+ *
* @level: compression level
*
* This iterates over the set bits in the active_map beginning at the requested
@@ -256,7 +257,8 @@ static struct list_head *zstd_find_workspace(unsigned int level)
}
/*
- * zstd_get_workspace - zstd's get_workspace
+ * Zstd get_workspace for level.
+ *
* @level: compression level
*
* If @level is 0, then any compression level can be used. Therefore, we begin
@@ -296,7 +298,8 @@ again:
}
/*
- * zstd_put_workspace - zstd put_workspace
+ * Zstd put_workspace.
+ *
* @ws: list_head for the workspace
*
* When putting back a workspace, we only need to update the LRU if we are of
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index f4863078f7fe..936b9e0b351d 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -750,7 +750,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n",
page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not ");
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(osdc, req);
err = ceph_osdc_wait_request(osdc, req);
@@ -1327,7 +1327,7 @@ new_request:
pages = NULL;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
req = NULL;
@@ -1875,7 +1875,7 @@ int ceph_uninline_data(struct file *file)
goto out_unlock;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
ceph_osdc_put_request(req);
@@ -1917,7 +1917,7 @@ int ceph_uninline_data(struct file *file)
goto out_put_req;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -2092,7 +2092,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
0, false, true);
ceph_osdc_start_request(&fsc->client->osdc, rd_req);
- wr_req->r_mtime = ci->netfs.inode.i_mtime;
+ wr_req->r_mtime = inode_get_mtime(&ci->netfs.inode);
ceph_osdc_start_request(&fsc->client->osdc, wr_req);
err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 14215ec646f7..a104669fcf4c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1421,8 +1421,8 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
arg->old_xattr_buf = NULL;
}
- arg->mtime = inode->i_mtime;
- arg->atime = inode->i_atime;
+ arg->mtime = inode_get_mtime(inode);
+ arg->atime = inode_get_atime(inode);
arg->ctime = inode_get_ctime(inode);
arg->btime = ci->i_btime;
arg->change_attr = inode_peek_iversion_raw(inode);
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
index 5b5112c78462..e3b1c3fab412 100644
--- a/fs/ceph/crypto.c
+++ b/fs/ceph/crypto.c
@@ -133,6 +133,7 @@ static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
}
static struct fscrypt_operations ceph_fscrypt_ops = {
+ .needs_bounce_pages = 1,
.get_context = ceph_crypt_get_context,
.set_context = ceph_crypt_set_context,
.get_dummy_policy = ceph_get_dummy_policy,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index b5f8038065d7..649600d0a7b6 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -2489,7 +2489,7 @@ static int ceph_zero_partial_object(struct inode *inode,
goto out;
}
- req->r_mtime = inode->i_mtime;
+ req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(&fsc->client->osdc, req);
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
if (ret == -ENOENT)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index b79100f720b3..2e2a303b9e64 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -185,9 +185,9 @@ struct inode *ceph_get_snapdir(struct inode *parent)
inode->i_mode = parent->i_mode;
inode->i_uid = parent->i_uid;
inode->i_gid = parent->i_gid;
- inode->i_mtime = parent->i_mtime;
+ inode_set_mtime_to_ts(inode, inode_get_mtime(parent));
inode_set_ctime_to_ts(inode, inode_get_ctime(parent));
- inode->i_atime = parent->i_atime;
+ inode_set_atime_to_ts(inode, inode_get_atime(parent));
ci->i_rbytes = 0;
ci->i_btime = ceph_inode(parent)->i_btime;
@@ -835,28 +835,31 @@ void ceph_fill_file_time(struct inode *inode, int issued,
/* the MDS did a utimes() */
dout("mtime %lld.%09ld -> %lld.%09ld "
"tw %d -> %d\n",
- inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ inode_get_mtime_sec(inode),
+ inode_get_mtime_nsec(inode),
mtime->tv_sec, mtime->tv_nsec,
ci->i_time_warp_seq, (int)time_warp_seq);
- inode->i_mtime = *mtime;
- inode->i_atime = *atime;
+ inode_set_mtime_to_ts(inode, *mtime);
+ inode_set_atime_to_ts(inode, *atime);
ci->i_time_warp_seq = time_warp_seq;
} else if (time_warp_seq == ci->i_time_warp_seq) {
+ struct timespec64 ts;
+
/* nobody did utimes(); take the max */
- if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
+ ts = inode_get_mtime(inode);
+ if (timespec64_compare(mtime, &ts) > 0) {
dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
- inode->i_mtime.tv_sec,
- inode->i_mtime.tv_nsec,
+ ts.tv_sec, ts.tv_nsec,
mtime->tv_sec, mtime->tv_nsec);
- inode->i_mtime = *mtime;
+ inode_set_mtime_to_ts(inode, *mtime);
}
- if (timespec64_compare(atime, &inode->i_atime) > 0) {
+ ts = inode_get_atime(inode);
+ if (timespec64_compare(atime, &ts) > 0) {
dout("atime %lld.%09ld -> %lld.%09ld inc\n",
- inode->i_atime.tv_sec,
- inode->i_atime.tv_nsec,
+ ts.tv_sec, ts.tv_nsec,
atime->tv_sec, atime->tv_nsec);
- inode->i_atime = *atime;
+ inode_set_atime_to_ts(inode, *atime);
}
} else if (issued & CEPH_CAP_FILE_EXCL) {
/* we did a utimes(); ignore mds values */
@@ -867,8 +870,8 @@ void ceph_fill_file_time(struct inode *inode, int issued,
/* we have no write|excl caps; whatever the MDS says is true */
if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
inode_set_ctime_to_ts(inode, *ctime);
- inode->i_mtime = *mtime;
- inode->i_atime = *atime;
+ inode_set_mtime_to_ts(inode, *mtime);
+ inode_set_atime_to_ts(inode, *atime);
ci->i_time_warp_seq = time_warp_seq;
} else {
warn = 1;
@@ -2551,20 +2554,22 @@ retry:
}
if (ia_valid & ATTR_ATIME) {
+ struct timespec64 atime = inode_get_atime(inode);
+
dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
- inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
+ atime.tv_sec, atime.tv_nsec,
attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
dirtied |= CEPH_CAP_FILE_EXCL;
} else if ((issued & CEPH_CAP_FILE_WR) &&
- timespec64_compare(&inode->i_atime,
- &attr->ia_atime) < 0) {
- inode->i_atime = attr->ia_atime;
+ timespec64_compare(&atime,
+ &attr->ia_atime) < 0) {
+ inode_set_atime_to_ts(inode, attr->ia_atime);
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
+ !timespec64_equal(&atime, &attr->ia_atime)) {
ceph_encode_timespec64(&req->r_args.setattr.atime,
&attr->ia_atime);
mask |= CEPH_SETATTR_ATIME;
@@ -2624,20 +2629,21 @@ retry:
}
}
if (ia_valid & ATTR_MTIME) {
+ struct timespec64 mtime = inode_get_mtime(inode);
+
dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
- inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ mtime.tv_sec, mtime.tv_nsec,
attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
if (issued & CEPH_CAP_FILE_EXCL) {
ci->i_time_warp_seq++;
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
dirtied |= CEPH_CAP_FILE_EXCL;
} else if ((issued & CEPH_CAP_FILE_WR) &&
- timespec64_compare(&inode->i_mtime,
- &attr->ia_mtime) < 0) {
- inode->i_mtime = attr->ia_mtime;
+ timespec64_compare(&mtime, &attr->ia_mtime) < 0) {
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
dirtied |= CEPH_CAP_FILE_WR;
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
- !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
+ !timespec64_equal(&mtime, &attr->ia_mtime)) {
ceph_encode_timespec64(&req->r_args.setattr.mtime,
&attr->ia_mtime);
mask |= CEPH_SETATTR_MTIME;
@@ -2651,8 +2657,8 @@ retry:
bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
- inode_get_ctime(inode).tv_sec,
- inode_get_ctime(inode).tv_nsec,
+ inode_get_ctime_sec(inode),
+ inode_get_ctime_nsec(inode),
attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
only ? "ctime only" : "ignored");
if (only) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 615db141b6c4..de798444bb97 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -861,8 +861,8 @@ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
if (!d_same_name(udentry, pdentry, &dname))
goto next;
+ found = dget_dlock(udentry);
spin_unlock(&udentry->d_lock);
- found = dget(udentry);
break;
next:
spin_unlock(&udentry->d_lock);
@@ -4353,12 +4353,16 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
rec.v2.flock_len = (__force __le32)
((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
} else {
+ struct timespec64 ts;
+
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v1.issued = cpu_to_le32(cap->issued);
rec.v1.size = cpu_to_le64(i_size_read(inode));
- ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
- ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
+ ts = inode_get_mtime(inode);
+ ceph_encode_timespec64(&rec.v1.mtime, &ts);
+ ts = inode_get_atime(inode);
+ ceph_encode_timespec64(&rec.v1.atime, &ts);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v1.pathbase = cpu_to_le64(pathbase);
}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 813f21add992..6732e1ea97d9 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -658,8 +658,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
BUG_ON(capsnap->writing);
capsnap->size = i_size_read(inode);
- capsnap->mtime = inode->i_mtime;
- capsnap->atime = inode->i_atime;
+ capsnap->mtime = inode_get_mtime(inode);
+ capsnap->atime = inode_get_atime(inode);
capsnap->ctime = inode_get_ctime(inode);
capsnap->btime = ci->i_btime;
capsnap->change_attr = inode_peek_iversion_raw(inode);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 51c7f2b14f6f..98844fc8a2f7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1119,7 +1119,7 @@ ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
-extern const struct xattr_handler *ceph_xattr_handlers[];
+extern const struct xattr_handler * const ceph_xattr_handlers[];
struct ceph_acl_sec_ctx {
#ifdef CONFIG_CEPH_FS_POSIX_ACL
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 0deae4a0f5f1..097ce7f74073 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1446,7 +1446,7 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
* List of handlers for synthetic system.* attributes. Other
* attributes are handled directly.
*/
-const struct xattr_handler *ceph_xattr_handlers[] = {
+const struct xattr_handler * const ceph_xattr_handlers[] = {
&ceph_other_xattr_handler,
NULL,
};
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 950b6919fb87..6ba032442b39 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -350,7 +350,7 @@ static struct kobject *cdev_get(struct cdev *p)
struct module *owner = p->owner;
struct kobject *kobj;
- if (owner && !try_module_get(owner))
+ if (!try_module_get(owner))
return NULL;
kobj = kobject_get_unless_zero(&p->kobj);
if (!kobj)
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index ae023853a98f..1d2dac95f86a 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -123,9 +123,11 @@ void coda_vattr_to_iattr(struct inode *inode, struct coda_vattr *attr)
if (attr->va_size != -1)
inode->i_blocks = (attr->va_size + 511) >> 9;
if (attr->va_atime.tv_sec != -1)
- inode->i_atime = coda_to_timespec64(attr->va_atime);
+ inode_set_atime_to_ts(inode,
+ coda_to_timespec64(attr->va_atime));
if (attr->va_mtime.tv_sec != -1)
- inode->i_mtime = coda_to_timespec64(attr->va_mtime);
+ inode_set_mtime_to_ts(inode,
+ coda_to_timespec64(attr->va_mtime));
if (attr->va_ctime.tv_sec != -1)
inode_set_ctime_to_ts(inode,
coda_to_timespec64(attr->va_ctime));
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index cb512b10473b..4e552ba7bd43 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -111,7 +111,7 @@ static inline void coda_dir_update_mtime(struct inode *dir)
/* optimistically we can also act as if our nose bleeds. The
* granularity of the mtime is coarse anyways so we might actually be
* right most of the time. Note: we only do this for directories. */
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
#endif
}
diff --git a/fs/coda/file.c b/fs/coda/file.c
index 42346618b4ed..16acc58311ea 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -84,7 +84,7 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
ret = vfs_iter_write(cfi->cfi_container, to, &iocb->ki_pos, 0);
coda_inode->i_size = file_inode(host_file)->i_size;
coda_inode->i_blocks = (coda_inode->i_size + 511) >> 9;
- coda_inode->i_mtime = inode_set_ctime_current(coda_inode);
+ inode_set_mtime_to_ts(coda_inode, inode_set_ctime_current(coda_inode));
inode_unlock(coda_inode);
file_end_write(host_file);
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index fbdcb3582926..dcc22f593e43 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -88,7 +88,7 @@ int configfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
@@ -96,8 +96,8 @@ static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
inode->i_mode = iattr->ia_mode;
inode->i_uid = iattr->ia_uid;
inode->i_gid = iattr->ia_gid;
- inode->i_atime = iattr->ia_atime;
- inode->i_mtime = iattr->ia_mtime;
+ inode_set_atime_to_ts(inode, iattr->ia_atime);
+ inode_set_mtime_to_ts(inode, iattr->ia_mtime);
inode_set_ctime_to_ts(inode, iattr->ia_ctime);
}
@@ -171,7 +171,7 @@ struct inode *configfs_create(struct dentry *dentry, umode_t mode)
return ERR_PTR(-ENOMEM);
p_inode = d_inode(dentry->d_parent);
- p_inode->i_mtime = inode_set_ctime_current(p_inode);
+ inode_set_mtime_to_ts(p_inode, inode_set_ctime_current(p_inode));
configfs_set_inode_lock_class(sd, inode);
return inode;
}
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 5ee7d7bbb361..60dbfa0f8805 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -133,8 +133,8 @@ static struct inode *get_cramfs_inode(struct super_block *sb,
}
/* Struct copy intentional */
- inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
- zerotime);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, zerotime)));
/* inode->i_nlink is left 1 - arguably wrong for directories,
but it's the best we can do without reading the directory
contents. 1 yields the right result in GNU find, even
@@ -495,7 +495,7 @@ static void cramfs_kill_sb(struct super_block *sb)
sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- blkdev_put(sb->s_bdev, sb);
+ bdev_release(sb->s_bdev_handle);
}
kfree(sbi);
}
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 62e1a3dd8357..0ad8c30b8fa5 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -111,10 +111,14 @@ out:
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
- const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits;
- const unsigned int blocks_per_page = 1 << blocks_per_page_bits;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
+ const unsigned int du_per_page_bits = PAGE_SHIFT - du_bits;
+ const unsigned int du_per_page = 1U << du_per_page_bits;
+ u64 du_index = (u64)lblk << (inode->i_blkbits - du_bits);
+ u64 du_remaining = (u64)len << (inode->i_blkbits - du_bits);
+ sector_t sector = pblk << (inode->i_blkbits - SECTOR_SHIFT);
struct page *pages[16]; /* write up to 16 pages at a time */
unsigned int nr_pages;
unsigned int i;
@@ -130,8 +134,8 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
len);
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
- nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
- (len + blocks_per_page - 1) >> blocks_per_page_bits);
+ nr_pages = min_t(u64, ARRAY_SIZE(pages),
+ (du_remaining + du_per_page - 1) >> du_per_page_bits);
/*
* We need at least one page for ciphertext. Allocate the first one
@@ -154,21 +158,22 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS);
do {
- bio->bi_iter.bi_sector = pblk << (blockbits - 9);
+ bio->bi_iter.bi_sector = sector;
i = 0;
offset = 0;
do {
- err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
- ZERO_PAGE(0), pages[i],
- blocksize, offset, GFP_NOFS);
+ err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, du_index,
+ ZERO_PAGE(0), pages[i],
+ du_size, offset,
+ GFP_NOFS);
if (err)
goto out;
- lblk++;
- pblk++;
- len--;
- offset += blocksize;
- if (offset == PAGE_SIZE || len == 0) {
+ du_index++;
+ sector += 1U << (du_bits - SECTOR_SHIFT);
+ du_remaining--;
+ offset += du_size;
+ if (offset == PAGE_SIZE || du_remaining == 0) {
ret = bio_add_page(bio, pages[i++], offset, 0);
if (WARN_ON_ONCE(ret != offset)) {
err = -EIO;
@@ -176,13 +181,13 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}
offset = 0;
}
- } while (i != nr_pages && len != 0);
+ } while (i != nr_pages && du_remaining != 0);
err = submit_bio_wait(bio);
if (err)
goto out;
bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE);
- } while (len != 0);
+ } while (du_remaining != 0);
err = 0;
out:
bio_put(bio);
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 6a837e4b80dc..328470d40dec 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -39,7 +39,7 @@ static mempool_t *fscrypt_bounce_page_pool = NULL;
static struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
-struct kmem_cache *fscrypt_info_cachep;
+struct kmem_cache *fscrypt_inode_info_cachep;
void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
@@ -49,6 +49,13 @@ EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
+ if (WARN_ON_ONCE(!fscrypt_bounce_page_pool)) {
+ /*
+ * Oops, the filesystem called a function that uses the bounce
+ * page pool, but it didn't set needs_bounce_pages.
+ */
+ return NULL;
+ }
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
}
@@ -70,44 +77,44 @@ void fscrypt_free_bounce_page(struct page *bounce_page)
EXPORT_SYMBOL(fscrypt_free_bounce_page);
/*
- * Generate the IV for the given logical block number within the given file.
- * For filenames encryption, lblk_num == 0.
+ * Generate the IV for the given data unit index within the given file.
+ * For filenames encryption, index == 0.
*
* Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks()
* needs to know about any IV generation methods where the low bits of IV don't
- * simply contain the lblk_num (e.g., IV_INO_LBLK_32).
+ * simply contain the data unit index (e.g., IV_INO_LBLK_32).
*/
-void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
- const struct fscrypt_info *ci)
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
+ const struct fscrypt_inode_info *ci)
{
u8 flags = fscrypt_policy_flags(&ci->ci_policy);
memset(iv, 0, ci->ci_mode->ivsize);
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) {
- WARN_ON_ONCE(lblk_num > U32_MAX);
+ WARN_ON_ONCE(index > U32_MAX);
WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX);
- lblk_num |= (u64)ci->ci_inode->i_ino << 32;
+ index |= (u64)ci->ci_inode->i_ino << 32;
} else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) {
- WARN_ON_ONCE(lblk_num > U32_MAX);
- lblk_num = (u32)(ci->ci_hashed_ino + lblk_num);
+ WARN_ON_ONCE(index > U32_MAX);
+ index = (u32)(ci->ci_hashed_ino + index);
} else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
}
- iv->lblk_num = cpu_to_le64(lblk_num);
+ iv->index = cpu_to_le64(index);
}
-/* Encrypt or decrypt a single filesystem block of file contents */
-int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
- u64 lblk_num, struct page *src_page,
- struct page *dest_page, unsigned int len,
- unsigned int offs, gfp_t gfp_flags)
+/* Encrypt or decrypt a single "data unit" of file contents. */
+int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
+ fscrypt_direction_t rw, u64 index,
+ struct page *src_page, struct page *dest_page,
+ unsigned int len, unsigned int offs,
+ gfp_t gfp_flags)
{
union fscrypt_iv iv;
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist dst, src;
- struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
int res = 0;
@@ -116,7 +123,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0))
return -EINVAL;
- fscrypt_generate_iv(&iv, lblk_num, ci);
+ fscrypt_generate_iv(&iv, index, ci);
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req)
@@ -137,28 +144,29 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
skcipher_request_free(req);
if (res) {
- fscrypt_err(inode, "%scryption failed for block %llu: %d",
- (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res);
+ fscrypt_err(ci->ci_inode,
+ "%scryption failed for data unit %llu: %d",
+ (rw == FS_DECRYPT ? "De" : "En"), index, res);
return res;
}
return 0;
}
/**
- * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a
- * pagecache page
- * @page: The locked pagecache page containing the block(s) to encrypt
- * @len: Total size of the block(s) to encrypt. Must be a nonzero
- * multiple of the filesystem's block size.
- * @offs: Byte offset within @page of the first block to encrypt. Must be
- * a multiple of the filesystem's block size.
- * @gfp_flags: Memory allocation flags. See details below.
+ * fscrypt_encrypt_pagecache_blocks() - Encrypt data from a pagecache page
+ * @page: the locked pagecache page containing the data to encrypt
+ * @len: size of the data to encrypt, in bytes
+ * @offs: offset within @page of the data to encrypt, in bytes
+ * @gfp_flags: memory allocation flags; see details below
+ *
+ * This allocates a new bounce page and encrypts the given data into it. The
+ * length and offset of the data must be aligned to the file's crypto data unit
+ * size. Alignment to the filesystem block size fulfills this requirement, as
+ * the filesystem block size is always a multiple of the data unit size.
*
- * A new bounce page is allocated, and the specified block(s) are encrypted into
- * it. In the bounce page, the ciphertext block(s) will be located at the same
- * offsets at which the plaintext block(s) were located in the source page; any
- * other parts of the bounce page will be left uninitialized. However, normally
- * blocksize == PAGE_SIZE and the whole page is encrypted at once.
+ * In the bounce page, the ciphertext data will be located at the same offset at
+ * which the plaintext data was located in the source page. Any other parts of
+ * the bounce page will be left uninitialized.
*
* This is for use by the filesystem's ->writepages() method.
*
@@ -176,28 +184,29 @@ struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
{
const struct inode *inode = page->mapping->host;
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
struct page *ciphertext_page;
- u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
- (offs >> blockbits);
+ u64 index = ((u64)page->index << (PAGE_SHIFT - du_bits)) +
+ (offs >> du_bits);
unsigned int i;
int err;
if (WARN_ON_ONCE(!PageLocked(page)))
return ERR_PTR(-EINVAL);
- if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
+ if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return ERR_PTR(-EINVAL);
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
if (!ciphertext_page)
return ERR_PTR(-ENOMEM);
- for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
- err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
- page, ciphertext_page,
- blocksize, i, gfp_flags);
+ for (i = offs; i < offs + len; i += du_size, index++) {
+ err = fscrypt_crypt_data_unit(ci, FS_ENCRYPT, index,
+ page, ciphertext_page,
+ du_size, i, gfp_flags);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
return ERR_PTR(err);
@@ -224,30 +233,33 @@ EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
+ * This is not compatible with fscrypt_operations::supports_subblock_data_units.
+ *
* Return: 0 on success; -errno on failure
*/
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
{
- return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
- len, offs, gfp_flags);
+ if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
+ return -EOPNOTSUPP;
+ return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_ENCRYPT,
+ lblk_num, page, page, len, offs,
+ gfp_flags);
}
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
/**
- * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a
- * pagecache folio
- * @folio: The locked pagecache folio containing the block(s) to decrypt
- * @len: Total size of the block(s) to decrypt. Must be a nonzero
- * multiple of the filesystem's block size.
- * @offs: Byte offset within @folio of the first block to decrypt. Must be
- * a multiple of the filesystem's block size.
+ * fscrypt_decrypt_pagecache_blocks() - Decrypt data from a pagecache folio
+ * @folio: the pagecache folio containing the data to decrypt
+ * @len: size of the data to decrypt, in bytes
+ * @offs: offset within @folio of the data to decrypt, in bytes
*
- * The specified block(s) are decrypted in-place within the pagecache folio,
- * which must still be locked and not uptodate.
- *
- * This is for use by the filesystem's ->readahead() method.
+ * Decrypt data that has just been read from an encrypted file. The data must
+ * be located in a pagecache folio that is still locked and not yet uptodate.
+ * The length and offset of the data must be aligned to the file's crypto data
+ * unit size. Alignment to the filesystem block size fulfills this requirement,
+ * as the filesystem block size is always a multiple of the data unit size.
*
* Return: 0 on success; -errno on failure
*/
@@ -255,25 +267,26 @@ int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len,
size_t offs)
{
const struct inode *inode = folio->mapping->host;
- const unsigned int blockbits = inode->i_blkbits;
- const unsigned int blocksize = 1 << blockbits;
- u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) +
- (offs >> blockbits);
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
+ const unsigned int du_bits = ci->ci_data_unit_bits;
+ const unsigned int du_size = 1U << du_bits;
+ u64 index = ((u64)folio->index << (PAGE_SHIFT - du_bits)) +
+ (offs >> du_bits);
size_t i;
int err;
if (WARN_ON_ONCE(!folio_test_locked(folio)))
return -EINVAL;
- if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
+ if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, du_size)))
return -EINVAL;
- for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
+ for (i = offs; i < offs + len; i += du_size, index++) {
struct page *page = folio_page(folio, i >> PAGE_SHIFT);
- err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
- page, blocksize, i & ~PAGE_MASK,
- GFP_NOFS);
+ err = fscrypt_crypt_data_unit(ci, FS_DECRYPT, index, page,
+ page, du_size, i & ~PAGE_MASK,
+ GFP_NOFS);
if (err)
return err;
}
@@ -295,14 +308,19 @@ EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
+ * This is not compatible with fscrypt_operations::supports_subblock_data_units.
+ *
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
- return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
- len, offs, GFP_NOFS);
+ if (WARN_ON_ONCE(inode->i_sb->s_cop->supports_subblock_data_units))
+ return -EOPNOTSUPP;
+ return fscrypt_crypt_data_unit(inode->i_crypt_info, FS_DECRYPT,
+ lblk_num, page, page, len, offs,
+ GFP_NOFS);
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
@@ -325,7 +343,7 @@ int fscrypt_initialize(struct super_block *sb)
return 0;
/* No need to allocate a bounce page pool if this FS won't use it. */
- if (sb->s_cop->flags & FS_CFLG_OWN_PAGES)
+ if (!sb->s_cop->needs_bounce_pages)
return 0;
mutex_lock(&fscrypt_init_mutex);
@@ -391,18 +409,19 @@ static int __init fscrypt_init(void)
if (!fscrypt_read_workqueue)
goto fail;
- fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
- if (!fscrypt_info_cachep)
+ fscrypt_inode_info_cachep = KMEM_CACHE(fscrypt_inode_info,
+ SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_inode_info_cachep)
goto fail_free_queue;
err = fscrypt_init_keyring();
if (err)
- goto fail_free_info;
+ goto fail_free_inode_info;
return 0;
-fail_free_info:
- kmem_cache_destroy(fscrypt_info_cachep);
+fail_free_inode_info:
+ kmem_cache_destroy(fscrypt_inode_info_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 6eae3f12ad50..7b3fc189593a 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -100,7 +100,7 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname,
{
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
- const struct fscrypt_info *ci = inode->i_crypt_info;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
union fscrypt_iv iv;
struct scatterlist sg;
@@ -157,7 +157,7 @@ static int fname_decrypt(const struct inode *inode,
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
- const struct fscrypt_info *ci = inode->i_crypt_info;
+ const struct fscrypt_inode_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_enc_key.tfm;
union fscrypt_iv iv;
int res;
@@ -568,7 +568,7 @@ EXPORT_SYMBOL_GPL(fscrypt_match_name);
*/
u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name)
{
- const struct fscrypt_info *ci = dir->i_crypt_info;
+ const struct fscrypt_inode_info *ci = dir->i_crypt_info;
WARN_ON_ONCE(!ci->ci_dirhash_key_initialized);
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 2d63da48635a..1892356cf924 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -47,7 +47,8 @@ struct fscrypt_context_v2 {
u8 contents_encryption_mode;
u8 filenames_encryption_mode;
u8 flags;
- u8 __reserved[4];
+ u8 log2_data_unit_size;
+ u8 __reserved[3];
u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
};
@@ -165,6 +166,26 @@ fscrypt_policy_flags(const union fscrypt_policy *policy)
BUG();
}
+static inline int
+fscrypt_policy_v2_du_bits(const struct fscrypt_policy_v2 *policy,
+ const struct inode *inode)
+{
+ return policy->log2_data_unit_size ?: inode->i_blkbits;
+}
+
+static inline int
+fscrypt_policy_du_bits(const union fscrypt_policy *policy,
+ const struct inode *inode)
+{
+ switch (policy->version) {
+ case FSCRYPT_POLICY_V1:
+ return inode->i_blkbits;
+ case FSCRYPT_POLICY_V2:
+ return fscrypt_policy_v2_du_bits(&policy->v2, inode);
+ }
+ BUG();
+}
+
/*
* For encrypted symlinks, the ciphertext length is stored at the beginning
* of the string in little-endian format.
@@ -189,18 +210,18 @@ struct fscrypt_prepared_key {
};
/*
- * fscrypt_info - the "encryption key" for an inode
+ * fscrypt_inode_info - the "encryption key" for an inode
*
* When an encrypted file's key is made available, an instance of this struct is
* allocated and stored in ->i_crypt_info. Once created, it remains until the
* inode is evicted.
*/
-struct fscrypt_info {
+struct fscrypt_inode_info {
/* The key in a form prepared for actual encryption/decryption */
struct fscrypt_prepared_key ci_enc_key;
- /* True if ci_enc_key should be freed when this fscrypt_info is freed */
+ /* True if ci_enc_key should be freed when this struct is freed */
bool ci_owns_key;
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
@@ -212,6 +233,16 @@ struct fscrypt_info {
#endif
/*
+ * log2 of the data unit size (granularity of contents encryption) of
+ * this file. This is computable from ci_policy and ci_inode but is
+ * cached here for efficiency. Only used for regular files.
+ */
+ u8 ci_data_unit_bits;
+
+ /* Cached value: log2 of number of data units per FS block */
+ u8 ci_data_units_per_block_bits;
+
+ /*
* Encryption mode used for this inode. It corresponds to either the
* contents or filenames encryption mode, depending on the inode type.
*/
@@ -263,12 +294,13 @@ typedef enum {
} fscrypt_direction_t;
/* crypto.c */
-extern struct kmem_cache *fscrypt_info_cachep;
+extern struct kmem_cache *fscrypt_inode_info_cachep;
int fscrypt_initialize(struct super_block *sb);
-int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
- u64 lblk_num, struct page *src_page,
- struct page *dest_page, unsigned int len,
- unsigned int offs, gfp_t gfp_flags);
+int fscrypt_crypt_data_unit(const struct fscrypt_inode_info *ci,
+ fscrypt_direction_t rw, u64 index,
+ struct page *src_page, struct page *dest_page,
+ unsigned int len, unsigned int offs,
+ gfp_t gfp_flags);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
void __printf(3, 4) __cold
@@ -283,8 +315,8 @@ fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...);
union fscrypt_iv {
struct {
- /* logical block number within the file */
- __le64 lblk_num;
+ /* zero-based index of data unit within the file */
+ __le64 index;
/* per-file nonce; only set in DIRECT_KEY mode */
u8 nonce[FSCRYPT_FILE_NONCE_SIZE];
@@ -293,8 +325,18 @@ union fscrypt_iv {
__le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)];
};
-void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
- const struct fscrypt_info *ci);
+void fscrypt_generate_iv(union fscrypt_iv *iv, u64 index,
+ const struct fscrypt_inode_info *ci);
+
+/*
+ * Return the number of bits used by the maximum file data unit index that is
+ * possible on the given filesystem, using the given log2 data unit size.
+ */
+static inline int
+fscrypt_max_file_dun_bits(const struct super_block *sb, int du_bits)
+{
+ return fls64(sb->s_maxbytes - 1) - du_bits;
+}
/* fname.c */
bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
@@ -332,17 +374,17 @@ void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf);
/* inline_crypt.c */
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci);
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci);
static inline bool
-fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
+fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
{
return ci->ci_inlinecrypt;
}
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
- const struct fscrypt_info *ci);
+ const struct fscrypt_inode_info *ci);
void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
struct fscrypt_prepared_key *prep_key);
@@ -353,7 +395,7 @@ void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
*/
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
/*
* The two smp_load_acquire()'s here pair with the smp_store_release()'s
@@ -370,13 +412,13 @@ fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
-static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+static inline int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci)
{
return 0;
}
static inline bool
-fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
+fscrypt_using_inline_encryption(const struct fscrypt_inode_info *ci)
{
return false;
}
@@ -384,7 +426,7 @@ fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
static inline int
fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
WARN_ON_ONCE(1);
return -EOPNOTSUPP;
@@ -398,7 +440,7 @@ fscrypt_destroy_inline_crypt_key(struct super_block *sb,
static inline bool
fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
return smp_load_acquire(&prep_key->tfm) != NULL;
}
@@ -433,8 +475,28 @@ struct fscrypt_master_key_secret {
* fscrypt_master_key - an in-use master key
*
* This represents a master encryption key which has been added to the
- * filesystem and can be used to "unlock" the encrypted files which were
- * encrypted with it.
+ * filesystem. There are three high-level states that a key can be in:
+ *
+ * FSCRYPT_KEY_STATUS_PRESENT
+ * Key is fully usable; it can be used to unlock inodes that are encrypted
+ * with it (this includes being able to create new inodes). ->mk_present
+ * indicates whether the key is in this state. ->mk_secret exists, the key
+ * is in the keyring, and ->mk_active_refs > 0 due to ->mk_present.
+ *
+ * FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED
+ * Removal of this key has been initiated, but some inodes that were
+ * unlocked with it are still in-use. Like ABSENT, ->mk_secret is wiped,
+ * and the key can no longer be used to unlock inodes. Unlike ABSENT, the
+ * key is still in the keyring; ->mk_decrypted_inodes is nonempty; and
+ * ->mk_active_refs > 0, being equal to the size of ->mk_decrypted_inodes.
+ *
+ * This state transitions to ABSENT if ->mk_decrypted_inodes becomes empty,
+ * or to PRESENT if FS_IOC_ADD_ENCRYPTION_KEY is called again for this key.
+ *
+ * FSCRYPT_KEY_STATUS_ABSENT
+ * Key is fully removed. The key is no longer in the keyring,
+ * ->mk_decrypted_inodes is empty, ->mk_active_refs == 0, ->mk_secret is
+ * wiped, and the key can no longer be used to unlock inodes.
*/
struct fscrypt_master_key {
@@ -444,7 +506,7 @@ struct fscrypt_master_key {
*/
struct hlist_node mk_node;
- /* Semaphore that protects ->mk_secret and ->mk_users */
+ /* Semaphore that protects ->mk_secret, ->mk_users, and ->mk_present */
struct rw_semaphore mk_sem;
/*
@@ -454,8 +516,8 @@ struct fscrypt_master_key {
* ->mk_direct_keys) that have been prepared continue to exist.
* A structural ref only guarantees that the struct continues to exist.
*
- * There is one active ref associated with ->mk_secret being present,
- * and one active ref for each inode in ->mk_decrypted_inodes.
+ * There is one active ref associated with ->mk_present being true, and
+ * one active ref for each inode in ->mk_decrypted_inodes.
*
* There is one structural ref associated with the active refcount being
* nonzero. Finding a key in the keyring also takes a structural ref,
@@ -467,17 +529,10 @@ struct fscrypt_master_key {
struct rcu_head mk_rcu_head;
/*
- * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is
- * executed, this is wiped and no new inodes can be unlocked with this
- * key; however, there may still be inodes in ->mk_decrypted_inodes
- * which could not be evicted. As long as some inodes still remain,
- * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or
- * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again.
+ * The secret key material. Wiped as soon as it is no longer needed;
+ * for details, see the fscrypt_master_key struct comment.
*
- * While ->mk_secret is present, one ref in ->mk_active_refs is held.
- *
- * Locking: protected by ->mk_sem. The manipulation of ->mk_active_refs
- * associated with this field is protected by ->mk_sem as well.
+ * Locking: protected by ->mk_sem.
*/
struct fscrypt_master_key_secret mk_secret;
@@ -500,7 +555,7 @@ struct fscrypt_master_key {
*
* Locking: protected by ->mk_sem. (We don't just rely on the keyrings
* subsystem semaphore ->mk_users->sem, as we need support for atomic
- * search+insert along with proper synchronization with ->mk_secret.)
+ * search+insert along with proper synchronization with other fields.)
*/
struct key *mk_users;
@@ -523,20 +578,17 @@ struct fscrypt_master_key {
siphash_key_t mk_ino_hash_key;
bool mk_ino_hash_key_initialized;
-} __randomize_layout;
-
-static inline bool
-is_master_key_secret_present(const struct fscrypt_master_key_secret *secret)
-{
/*
- * The READ_ONCE() is only necessary for fscrypt_drop_inode().
- * fscrypt_drop_inode() runs in atomic context, so it can't take the key
- * semaphore and thus 'secret' can change concurrently which would be a
- * data race. But fscrypt_drop_inode() only need to know whether the
- * secret *was* present at the time of check, so READ_ONCE() suffices.
+ * Whether this key is in the "present" state, i.e. fully usable. For
+ * details, see the fscrypt_master_key struct comment.
+ *
+ * Locking: protected by ->mk_sem, but can be read locklessly using
+ * READ_ONCE(). Writers must use WRITE_ONCE() when concurrent readers
+ * are possible.
*/
- return READ_ONCE(secret->size) != 0;
-}
+ bool mk_present;
+
+} __randomize_layout;
static inline const char *master_key_spec_type(
const struct fscrypt_key_specifier *spec)
@@ -598,17 +650,18 @@ struct fscrypt_mode {
extern struct fscrypt_mode fscrypt_modes[];
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key, const struct fscrypt_info *ci);
+ const u8 *raw_key, const struct fscrypt_inode_info *ci);
void fscrypt_destroy_prepared_key(struct super_block *sb,
struct fscrypt_prepared_key *prep_key);
-int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key);
+int fscrypt_set_per_file_enc_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_key);
-int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
+int fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk);
-void fscrypt_hash_inode_number(struct fscrypt_info *ci,
+void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk);
int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported);
@@ -643,10 +696,11 @@ static inline int fscrypt_require_key(struct inode *inode)
void fscrypt_put_direct_key(struct fscrypt_direct_key *dk);
-int fscrypt_setup_v1_file_key(struct fscrypt_info *ci,
+int fscrypt_setup_v1_file_key(struct fscrypt_inode_info *ci,
const u8 *raw_master_key);
-int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci);
+int fscrypt_setup_v1_file_key_via_subscribed_keyrings(
+ struct fscrypt_inode_info *ci);
/* policy.c */
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 6238dbcadcad..52504dd478d3 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(__fscrypt_prepare_setattr);
int fscrypt_prepare_setflags(struct inode *inode,
unsigned int oldflags, unsigned int flags)
{
- struct fscrypt_info *ci;
+ struct fscrypt_inode_info *ci;
struct fscrypt_master_key *mk;
int err;
@@ -187,7 +187,7 @@ int fscrypt_prepare_setflags(struct inode *inode,
return -EINVAL;
mk = ci->ci_master_key;
down_read(&mk->mk_sem);
- if (is_master_key_secret_present(&mk->mk_secret))
+ if (mk->mk_present)
err = fscrypt_derive_dirhash_key(ci, mk);
else
err = -ENOKEY;
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index 8bfb3ce86476..b4002aea7cdb 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -39,11 +39,11 @@ static struct block_device **fscrypt_get_devices(struct super_block *sb,
return devs;
}
-static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
+static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_inode_info *ci)
{
- struct super_block *sb = ci->ci_inode->i_sb;
+ const struct super_block *sb = ci->ci_inode->i_sb;
unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
- int ino_bits = 64, lblk_bits = 64;
+ int dun_bits;
if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
return offsetofend(union fscrypt_iv, nonce);
@@ -54,10 +54,9 @@ static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
return sizeof(__le32);
- /* Default case: IVs are just the file logical block number */
- if (sb->s_cop->get_ino_and_lblk_bits)
- sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
- return DIV_ROUND_UP(lblk_bits, 8);
+ /* Default case: IVs are just the file data unit index */
+ dun_bits = fscrypt_max_file_dun_bits(sb, ci->ci_data_unit_bits);
+ return DIV_ROUND_UP(dun_bits, 8);
}
/*
@@ -90,7 +89,7 @@ static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
}
/* Enable inline encryption for this file if supported. */
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
@@ -129,7 +128,7 @@ int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
* crypto configuration that the file would use.
*/
crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
- crypto_cfg.data_unit_size = sb->s_blocksize;
+ crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;
crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
devs = fscrypt_get_devices(sb, &num_devs);
@@ -152,7 +151,7 @@ out_free_devs:
int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
const u8 *raw_key,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
const struct inode *inode = ci->ci_inode;
struct super_block *sb = inode->i_sb;
@@ -168,7 +167,8 @@ int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
return -ENOMEM;
err = blk_crypto_init_key(blk_key, raw_key, crypto_mode,
- fscrypt_get_dun_bytes(ci), sb->s_blocksize);
+ fscrypt_get_dun_bytes(ci),
+ 1U << ci->ci_data_unit_bits);
if (err) {
fscrypt_err(inode, "error %d initializing blk-crypto key", err);
goto fail;
@@ -232,13 +232,15 @@ bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
}
EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
-static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
+static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
+ u64 lblk_num,
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
{
+ u64 index = lblk_num << ci->ci_data_units_per_block_bits;
union fscrypt_iv iv;
int i;
- fscrypt_generate_iv(&iv, lblk_num, ci);
+ fscrypt_generate_iv(&iv, index, ci);
BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
@@ -265,7 +267,7 @@ static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
if (!fscrypt_inode_uses_inline_crypto(inode))
@@ -456,7 +458,7 @@ EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
*/
u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
u32 dun;
if (!fscrypt_inode_uses_inline_crypto(inode))
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 7cbb1fd872ac..f34a9b0b9e92 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -99,10 +99,10 @@ void fscrypt_put_master_key_activeref(struct super_block *sb,
spin_unlock(&sb->s_master_keys->lock);
/*
- * ->mk_active_refs == 0 implies that ->mk_secret is not present and
- * that ->mk_decrypted_inodes is empty.
+ * ->mk_active_refs == 0 implies that ->mk_present is false and
+ * ->mk_decrypted_inodes is empty.
*/
- WARN_ON_ONCE(is_master_key_secret_present(&mk->mk_secret));
+ WARN_ON_ONCE(mk->mk_present);
WARN_ON_ONCE(!list_empty(&mk->mk_decrypted_inodes));
for (i = 0; i <= FSCRYPT_MODE_MAX; i++) {
@@ -121,6 +121,18 @@ void fscrypt_put_master_key_activeref(struct super_block *sb,
fscrypt_put_master_key(mk);
}
+/*
+ * This transitions the key state from present to incompletely removed, and then
+ * potentially to absent (depending on whether inodes remain).
+ */
+static void fscrypt_initiate_key_removal(struct super_block *sb,
+ struct fscrypt_master_key *mk)
+{
+ WRITE_ONCE(mk->mk_present, false);
+ wipe_master_key_secret(&mk->mk_secret);
+ fscrypt_put_master_key_activeref(sb, mk);
+}
+
static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec)
{
if (spec->__reserved)
@@ -234,14 +246,13 @@ void fscrypt_destroy_keyring(struct super_block *sb)
* evicted, every key remaining in the keyring should
* have an empty inode list, and should only still be in
* the keyring due to the single active ref associated
- * with ->mk_secret. There should be no structural refs
- * beyond the one associated with the active ref.
+ * with ->mk_present. There should be no structural
+ * refs beyond the one associated with the active ref.
*/
WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 1);
WARN_ON_ONCE(refcount_read(&mk->mk_struct_refs) != 1);
- WARN_ON_ONCE(!is_master_key_secret_present(&mk->mk_secret));
- wipe_master_key_secret(&mk->mk_secret);
- fscrypt_put_master_key_activeref(sb, mk);
+ WARN_ON_ONCE(!mk->mk_present);
+ fscrypt_initiate_key_removal(sb, mk);
}
}
kfree_sensitive(keyring);
@@ -439,7 +450,8 @@ static int add_new_master_key(struct super_block *sb,
}
move_master_key_secret(&mk->mk_secret, secret);
- refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */
+ mk->mk_present = true;
+ refcount_set(&mk->mk_active_refs, 1); /* ->mk_present is true */
spin_lock(&keyring->lock);
hlist_add_head_rcu(&mk->mk_node,
@@ -478,11 +490,18 @@ static int add_existing_master_key(struct fscrypt_master_key *mk,
return err;
}
- /* Re-add the secret if needed. */
- if (!is_master_key_secret_present(&mk->mk_secret)) {
- if (!refcount_inc_not_zero(&mk->mk_active_refs))
+ /* If the key is incompletely removed, make it present again. */
+ if (!mk->mk_present) {
+ if (!refcount_inc_not_zero(&mk->mk_active_refs)) {
+ /*
+ * Raced with the last active ref being dropped, so the
+ * key has become, or is about to become, "absent".
+ * Therefore, we need to allocate a new key struct.
+ */
return KEY_DEAD;
+ }
move_master_key_secret(&mk->mk_secret, secret);
+ WRITE_ONCE(mk->mk_present, true);
}
return 0;
@@ -506,8 +525,8 @@ static int do_add_master_key(struct super_block *sb,
err = add_new_master_key(sb, secret, mk_spec);
} else {
/*
- * Found the key in ->s_master_keys. Re-add the secret if
- * needed, and add the user to ->mk_users if needed.
+ * Found the key in ->s_master_keys. Add the user to ->mk_users
+ * if needed, and make the key "present" again if possible.
*/
down_write(&mk->mk_sem);
err = add_existing_master_key(mk, secret);
@@ -867,7 +886,7 @@ static void shrink_dcache_inode(struct inode *inode)
static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk)
{
- struct fscrypt_info *ci;
+ struct fscrypt_inode_info *ci;
struct inode *inode;
struct inode *toput_inode = NULL;
@@ -917,7 +936,7 @@ static int check_for_busy_inodes(struct super_block *sb,
/* select an example file to show for debugging purposes */
struct inode *inode =
list_first_entry(&mk->mk_decrypted_inodes,
- struct fscrypt_info,
+ struct fscrypt_inode_info,
ci_master_key_link)->ci_inode;
ino = inode->i_ino;
}
@@ -989,9 +1008,8 @@ static int try_to_lock_encrypted_files(struct super_block *sb,
*
* If all inodes were evicted, then we unlink the fscrypt_master_key from the
* keyring. Otherwise it remains in the keyring in the "incompletely removed"
- * state (without the actual secret key) where it tracks the list of remaining
- * inodes. Userspace can execute the ioctl again later to retry eviction, or
- * alternatively can re-add the secret key again.
+ * state where it tracks the list of remaining inodes. Userspace can execute
+ * the ioctl again later to retry eviction, or alternatively can re-add the key.
*
* For more details, see the "Removing keys" section of
* Documentation/filesystems/fscrypt.rst.
@@ -1053,11 +1071,10 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
}
}
- /* No user claims remaining. Go ahead and wipe the secret. */
+ /* No user claims remaining. Initiate removal of the key. */
err = -ENOKEY;
- if (is_master_key_secret_present(&mk->mk_secret)) {
- wipe_master_key_secret(&mk->mk_secret);
- fscrypt_put_master_key_activeref(sb, mk);
+ if (mk->mk_present) {
+ fscrypt_initiate_key_removal(sb, mk);
err = 0;
}
inodes_remain = refcount_read(&mk->mk_active_refs) > 0;
@@ -1074,9 +1091,9 @@ static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users)
}
/*
* We return 0 if we successfully did something: removed a claim to the
- * key, wiped the secret, or tried locking the files again. Users need
- * to check the informational status flags if they care whether the key
- * has been fully removed including all files locked.
+ * key, initiated removal of the key, or tried locking the files again.
+ * Users need to check the informational status flags if they care
+ * whether the key has been fully removed including all files locked.
*/
out_put_key:
fscrypt_put_master_key(mk);
@@ -1103,12 +1120,11 @@ EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users);
* Retrieve the status of an fscrypt master encryption key.
*
* We set ->status to indicate whether the key is absent, present, or
- * incompletely removed. "Incompletely removed" means that the master key
- * secret has been removed, but some files which had been unlocked with it are
- * still in use. This field allows applications to easily determine the state
- * of an encrypted directory without using a hack such as trying to open a
- * regular file in it (which can confuse the "incompletely removed" state with
- * absent or present).
+ * incompletely removed. (For an explanation of what these statuses mean and
+ * how they are represented internally, see struct fscrypt_master_key.) This
+ * field allows applications to easily determine the status of an encrypted
+ * directory without using a hack such as trying to open a regular file in it
+ * (which can confuse the "incompletely removed" status with absent or present).
*
* In addition, for v2 policy keys we allow applications to determine, via
* ->status_flags and ->user_count, whether the key has been added by the
@@ -1150,7 +1166,7 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg)
}
down_read(&mk->mk_sem);
- if (!is_master_key_secret_present(&mk->mk_secret)) {
+ if (!mk->mk_present) {
arg.status = refcount_read(&mk->mk_active_refs) > 0 ?
FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED :
FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */;
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index 361f41ef46c7..d71f7c799e79 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -148,7 +148,7 @@ err_free_tfm:
* and IV generation method (@ci->ci_policy.flags).
*/
int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key, const struct fscrypt_info *ci)
+ const u8 *raw_key, const struct fscrypt_inode_info *ci)
{
struct crypto_skcipher *tfm;
@@ -178,13 +178,14 @@ void fscrypt_destroy_prepared_key(struct super_block *sb,
}
/* Given a per-file encryption key, set up the file's crypto transform object */
-int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key)
+int fscrypt_set_per_file_enc_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_key)
{
ci->ci_owns_key = true;
return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci);
}
-static int setup_per_mode_enc_key(struct fscrypt_info *ci,
+static int setup_per_mode_enc_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk,
struct fscrypt_prepared_key *keys,
u8 hkdf_context, bool include_fs_uuid)
@@ -265,7 +266,7 @@ static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk,
return 0;
}
-int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
+int fscrypt_derive_dirhash_key(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk)
{
int err;
@@ -279,7 +280,7 @@ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci,
return 0;
}
-void fscrypt_hash_inode_number(struct fscrypt_info *ci,
+void fscrypt_hash_inode_number(struct fscrypt_inode_info *ci,
const struct fscrypt_master_key *mk)
{
WARN_ON_ONCE(ci->ci_inode->i_ino == 0);
@@ -289,7 +290,7 @@ void fscrypt_hash_inode_number(struct fscrypt_info *ci,
&mk->mk_ino_hash_key);
}
-static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci,
+static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk)
{
int err;
@@ -329,7 +330,7 @@ unlock:
return 0;
}
-static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
+static int fscrypt_setup_v2_file_key(struct fscrypt_inode_info *ci,
struct fscrypt_master_key *mk,
bool need_dirhash_key)
{
@@ -404,7 +405,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci,
* still allow 512-bit master keys if the user chooses to use them, though.)
*/
static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
- const struct fscrypt_info *ci)
+ const struct fscrypt_inode_info *ci)
{
unsigned int min_keysize;
@@ -430,11 +431,12 @@ static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk,
*
* If the master key is found in the filesystem-level keyring, then it is
* returned in *mk_ret with its semaphore read-locked. This is needed to ensure
- * that only one task links the fscrypt_info into ->mk_decrypted_inodes (as
- * multiple tasks may race to create an fscrypt_info for the same inode), and to
- * synchronize the master key being removed with a new inode starting to use it.
+ * that only one task links the fscrypt_inode_info into ->mk_decrypted_inodes
+ * (as multiple tasks may race to create an fscrypt_inode_info for the same
+ * inode), and to synchronize the master key being removed with a new inode
+ * starting to use it.
*/
-static int setup_file_encryption_key(struct fscrypt_info *ci,
+static int setup_file_encryption_key(struct fscrypt_inode_info *ci,
bool need_dirhash_key,
struct fscrypt_master_key **mk_ret)
{
@@ -484,8 +486,8 @@ static int setup_file_encryption_key(struct fscrypt_info *ci,
}
down_read(&mk->mk_sem);
- /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */
- if (!is_master_key_secret_present(&mk->mk_secret)) {
+ if (!mk->mk_present) {
+ /* FS_IOC_REMOVE_ENCRYPTION_KEY has been executed on this key */
err = -ENOKEY;
goto out_release_key;
}
@@ -519,7 +521,7 @@ out_release_key:
return err;
}
-static void put_crypt_info(struct fscrypt_info *ci)
+static void put_crypt_info(struct fscrypt_inode_info *ci)
{
struct fscrypt_master_key *mk;
@@ -537,8 +539,8 @@ static void put_crypt_info(struct fscrypt_info *ci)
/*
* Remove this inode from the list of inodes that were unlocked
* with the master key. In addition, if we're removing the last
- * inode from a master key struct that already had its secret
- * removed, then complete the full removal of the struct.
+ * inode from an incompletely removed key, then complete the
+ * full removal of the key.
*/
spin_lock(&mk->mk_decrypted_inodes_lock);
list_del(&ci->ci_master_key_link);
@@ -546,7 +548,7 @@ static void put_crypt_info(struct fscrypt_info *ci)
fscrypt_put_master_key_activeref(ci->ci_inode->i_sb, mk);
}
memzero_explicit(ci, sizeof(*ci));
- kmem_cache_free(fscrypt_info_cachep, ci);
+ kmem_cache_free(fscrypt_inode_info_cachep, ci);
}
static int
@@ -555,7 +557,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE],
bool need_dirhash_key)
{
- struct fscrypt_info *crypt_info;
+ struct fscrypt_inode_info *crypt_info;
struct fscrypt_mode *mode;
struct fscrypt_master_key *mk = NULL;
int res;
@@ -564,7 +566,7 @@ fscrypt_setup_encryption_info(struct inode *inode,
if (res)
return res;
- crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_KERNEL);
+ crypt_info = kmem_cache_zalloc(fscrypt_inode_info_cachep, GFP_KERNEL);
if (!crypt_info)
return -ENOMEM;
@@ -580,6 +582,11 @@ fscrypt_setup_encryption_info(struct inode *inode,
WARN_ON_ONCE(mode->ivsize > FSCRYPT_MAX_IV_SIZE);
crypt_info->ci_mode = mode;
+ crypt_info->ci_data_unit_bits =
+ fscrypt_policy_du_bits(&crypt_info->ci_policy, inode);
+ crypt_info->ci_data_units_per_block_bits =
+ inode->i_blkbits - crypt_info->ci_data_unit_bits;
+
res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk);
if (res)
goto out;
@@ -587,8 +594,8 @@ fscrypt_setup_encryption_info(struct inode *inode,
/*
* For existing inodes, multiple tasks may race to set ->i_crypt_info.
* So use cmpxchg_release(). This pairs with the smp_load_acquire() in
- * fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a
- * RELEASE barrier so that other tasks can ACQUIRE it.
+ * fscrypt_get_inode_info(). I.e., here we publish ->i_crypt_info with
+ * a RELEASE barrier so that other tasks can ACQUIRE it.
*/
if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) {
/*
@@ -735,8 +742,8 @@ EXPORT_SYMBOL_GPL(fscrypt_prepare_new_inode);
* fscrypt_put_encryption_info() - free most of an inode's fscrypt data
* @inode: an inode being evicted
*
- * Free the inode's fscrypt_info. Filesystems must call this when the inode is
- * being evicted. An RCU grace period need not have elapsed yet.
+ * Free the inode's fscrypt_inode_info. Filesystems must call this when the
+ * inode is being evicted. An RCU grace period need not have elapsed yet.
*/
void fscrypt_put_encryption_info(struct inode *inode)
{
@@ -773,7 +780,7 @@ EXPORT_SYMBOL(fscrypt_free_inode);
*/
int fscrypt_drop_inode(struct inode *inode)
{
- const struct fscrypt_info *ci = fscrypt_get_info(inode);
+ const struct fscrypt_inode_info *ci = fscrypt_get_inode_info(inode);
/*
* If ci is NULL, then the inode doesn't have an encryption key set up
@@ -794,13 +801,14 @@ int fscrypt_drop_inode(struct inode *inode)
return 0;
/*
- * Note: since we aren't holding the key semaphore, the result here can
+ * We can't take ->mk_sem here, since this runs in atomic context.
+ * Therefore, ->mk_present can change concurrently, and our result may
* immediately become outdated. But there's no correctness problem with
* unnecessarily evicting. Nor is there a correctness problem with not
* evicting while iput() is racing with the key being removed, since
* then the thread removing the key will either evict the inode itself
* or will correctly detect that it wasn't evicted due to the race.
*/
- return !is_master_key_secret_present(&ci->ci_master_key->mk_secret);
+ return !READ_ONCE(ci->ci_master_key->mk_present);
}
EXPORT_SYMBOL_GPL(fscrypt_drop_inode);
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index 75dabd9b27f9..a10710bc8123 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -178,7 +178,8 @@ void fscrypt_put_direct_key(struct fscrypt_direct_key *dk)
*/
static struct fscrypt_direct_key *
find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
- const u8 *raw_key, const struct fscrypt_info *ci)
+ const u8 *raw_key,
+ const struct fscrypt_inode_info *ci)
{
unsigned long hash_key;
struct fscrypt_direct_key *dk;
@@ -218,7 +219,7 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert,
/* Prepare to encrypt directly using the master key in the given mode */
static struct fscrypt_direct_key *
-fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key)
+fscrypt_get_direct_key(const struct fscrypt_inode_info *ci, const u8 *raw_key)
{
struct fscrypt_direct_key *dk;
int err;
@@ -250,7 +251,7 @@ err_free_dk:
}
/* v1 policy, DIRECT_KEY: use the master key directly */
-static int setup_v1_file_key_direct(struct fscrypt_info *ci,
+static int setup_v1_file_key_direct(struct fscrypt_inode_info *ci,
const u8 *raw_master_key)
{
struct fscrypt_direct_key *dk;
@@ -264,7 +265,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci,
}
/* v1 policy, !DIRECT_KEY: derive the file's encryption key */
-static int setup_v1_file_key_derived(struct fscrypt_info *ci,
+static int setup_v1_file_key_derived(struct fscrypt_inode_info *ci,
const u8 *raw_master_key)
{
u8 *derived_key;
@@ -289,7 +290,8 @@ out:
return err;
}
-int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
+int fscrypt_setup_v1_file_key(struct fscrypt_inode_info *ci,
+ const u8 *raw_master_key)
{
if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
return setup_v1_file_key_direct(ci, raw_master_key);
@@ -297,8 +299,10 @@ int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key)
return setup_v1_file_key_derived(ci, raw_master_key);
}
-int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)
+int
+fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_inode_info *ci)
{
+ const struct super_block *sb = ci->ci_inode->i_sb;
struct key *key;
const struct fscrypt_key *payload;
int err;
@@ -306,8 +310,8 @@ int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci)
key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX,
ci->ci_policy.v1.master_key_descriptor,
ci->ci_mode->keysize, &payload);
- if (key == ERR_PTR(-ENOKEY) && ci->ci_inode->i_sb->s_cop->key_prefix) {
- key = find_and_lock_process_key(ci->ci_inode->i_sb->s_cop->key_prefix,
+ if (key == ERR_PTR(-ENOKEY) && sb->s_cop->legacy_key_prefix) {
+ key = find_and_lock_process_key(sb->s_cop->legacy_key_prefix,
ci->ci_policy.v1.master_key_descriptor,
ci->ci_mode->keysize, &payload);
}
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index f4456ecb3f87..701259991277 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -118,12 +118,11 @@ static bool supported_direct_key_modes(const struct inode *inode,
}
static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
- const struct inode *inode,
- const char *type,
- int max_ino_bits, int max_lblk_bits)
+ const struct inode *inode)
{
+ const char *type = (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
+ ? "IV_INO_LBLK_64" : "IV_INO_LBLK_32";
struct super_block *sb = inode->i_sb;
- int ino_bits = 64, lblk_bits = 64;
/*
* IV_INO_LBLK_* exist only because of hardware limitations, and
@@ -150,17 +149,29 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy,
type, sb->s_id);
return false;
}
- if (sb->s_cop->get_ino_and_lblk_bits)
- sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits);
- if (ino_bits > max_ino_bits) {
+
+ /*
+ * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that inode numbers fit
+ * in 32 bits. In principle, IV_INO_LBLK_32 could support longer inode
+ * numbers because it hashes the inode number; however, currently the
+ * inode number is gotten from inode::i_ino which is 'unsigned long'.
+ * So for now the implementation limit is 32 bits.
+ */
+ if (!sb->s_cop->has_32bit_inodes) {
fscrypt_warn(inode,
"Can't use %s policy on filesystem '%s' because its inode numbers are too long",
type, sb->s_id);
return false;
}
- if (lblk_bits > max_lblk_bits) {
+
+ /*
+ * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that file data unit
+ * indices fit in 32 bits.
+ */
+ if (fscrypt_max_file_dun_bits(sb,
+ fscrypt_policy_v2_du_bits(policy, inode)) > 32) {
fscrypt_warn(inode,
- "Can't use %s policy on filesystem '%s' because its block numbers are too long",
+ "Can't use %s policy on filesystem '%s' because its maximum file size is too large",
type, sb->s_id);
return false;
}
@@ -233,25 +244,39 @@ static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy,
return false;
}
+ if (policy->log2_data_unit_size) {
+ if (!inode->i_sb->s_cop->supports_subblock_data_units) {
+ fscrypt_warn(inode,
+ "Filesystem does not support configuring crypto data unit size");
+ return false;
+ }
+ if (policy->log2_data_unit_size > inode->i_blkbits ||
+ policy->log2_data_unit_size < SECTOR_SHIFT /* 9 */) {
+ fscrypt_warn(inode,
+ "Unsupported log2_data_unit_size in encryption policy: %d",
+ policy->log2_data_unit_size);
+ return false;
+ }
+ if (policy->log2_data_unit_size != inode->i_blkbits &&
+ (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) {
+ /*
+ * Not safe to enable yet, as we need to ensure that DUN
+ * wraparound can only occur on a FS block boundary.
+ */
+ fscrypt_warn(inode,
+ "Sub-block data units not yet supported with IV_INO_LBLK_32");
+ return false;
+ }
+ }
+
if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) &&
!supported_direct_key_modes(inode, policy->contents_encryption_mode,
policy->filenames_encryption_mode))
return false;
- if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) &&
- !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64",
- 32, 32))
- return false;
-
- /*
- * IV_INO_LBLK_32 hashes the inode number, so in principle it can
- * support any ino_bits. However, currently the inode number is gotten
- * from inode::i_ino which is 'unsigned long'. So for now the
- * implementation limit is 32 bits.
- */
- if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
- !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32",
- 32, 32))
+ if ((policy->flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
+ FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) &&
+ !supported_iv_ino_lblk_policy(policy, inode))
return false;
if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) {
@@ -330,6 +355,7 @@ static int fscrypt_new_context(union fscrypt_context *ctx_u,
ctx->filenames_encryption_mode =
policy->filenames_encryption_mode;
ctx->flags = policy->flags;
+ ctx->log2_data_unit_size = policy->log2_data_unit_size;
memcpy(ctx->master_key_identifier,
policy->master_key_identifier,
sizeof(ctx->master_key_identifier));
@@ -390,6 +416,7 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
policy->filenames_encryption_mode =
ctx->filenames_encryption_mode;
policy->flags = ctx->flags;
+ policy->log2_data_unit_size = ctx->log2_data_unit_size;
memcpy(policy->__reserved, ctx->__reserved,
sizeof(policy->__reserved));
memcpy(policy->master_key_identifier,
@@ -405,11 +432,11 @@ int fscrypt_policy_from_context(union fscrypt_policy *policy_u,
/* Retrieve an inode's encryption policy */
static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy)
{
- const struct fscrypt_info *ci;
+ const struct fscrypt_inode_info *ci;
union fscrypt_context ctx;
int ret;
- ci = fscrypt_get_info(inode);
+ ci = fscrypt_get_inode_info(inode);
if (ci) {
/* key available, use the cached policy */
*policy = ci->ci_policy;
@@ -647,7 +674,7 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
/*
* Both parent and child are encrypted, so verify they use the same
- * encryption policy. Compare the fscrypt_info structs if the keys are
+ * encryption policy. Compare the cached policies if the keys are
* available, otherwise retrieve and compare the fscrypt_contexts.
*
* Note that the fscrypt_context retrieval will be required frequently
@@ -717,7 +744,7 @@ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir)
*/
int fscrypt_context_for_new_inode(void *ctx, struct inode *inode)
{
- struct fscrypt_info *ci = inode->i_crypt_info;
+ struct fscrypt_inode_info *ci = inode->i_crypt_info;
BUILD_BUG_ON(sizeof(union fscrypt_context) !=
FSCRYPT_SET_CONTEXT_MAX_SIZE);
@@ -742,7 +769,7 @@ EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode);
*/
int fscrypt_set_context(struct inode *inode, void *fs_data)
{
- struct fscrypt_info *ci = inode->i_crypt_info;
+ struct fscrypt_inode_info *ci = inode->i_crypt_info;
union fscrypt_context ctx;
int ctxsize;
diff --git a/fs/dcache.c b/fs/dcache.c
index 25ac74d30bff..796e23761ba0 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3246,11 +3246,10 @@ void d_genocide(struct dentry *parent)
d_walk(parent, parent, d_genocide_kill);
}
-void d_tmpfile(struct file *file, struct inode *inode)
+void d_mark_tmpfile(struct file *file, struct inode *inode)
{
struct dentry *dentry = file->f_path.dentry;
- inode_dec_link_count(inode);
BUG_ON(dentry->d_name.name != dentry->d_iname ||
!hlist_unhashed(&dentry->d_u.d_alias) ||
!d_unlinked(dentry));
@@ -3260,6 +3259,15 @@ void d_tmpfile(struct file *file, struct inode *inode)
(unsigned long long)inode->i_ino);
spin_unlock(&dentry->d_lock);
spin_unlock(&dentry->d_parent->d_lock);
+}
+EXPORT_SYMBOL(d_mark_tmpfile);
+
+void d_tmpfile(struct file *file, struct inode *inode)
+{
+ struct dentry *dentry = file->f_path.dentry;
+
+ inode_dec_link_count(inode);
+ d_mark_tmpfile(file, inode);
d_instantiate(dentry, inode);
}
EXPORT_SYMBOL(d_tmpfile);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 83e57e9f9fa0..5d41765e0c77 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -72,7 +72,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 299c295a27a0..c830261aa883 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -338,7 +338,7 @@ static int mknod_ptmx(struct super_block *sb)
}
inode->i_ino = 2;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
mode = S_IFCHR|opts->ptmxmode;
init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2));
@@ -451,7 +451,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
if (!inode)
goto fail;
inode->i_ino = 1;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
@@ -560,7 +560,7 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
inode->i_ino = index + 3;
inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
sprintf(s, "%d", index);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index f2ed0c0266cb..c586c5db18b5 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -702,6 +702,6 @@ int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
loff_t offset);
-extern const struct xattr_handler *ecryptfs_xattr_handlers[];
+extern const struct xattr_handler * const ecryptfs_xattr_handlers[];
#endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 992d9c7e64ae..a25dd3d20008 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1210,7 +1210,7 @@ static const struct xattr_handler ecryptfs_xattr_handler = {
.set = ecryptfs_xattr_set,
};
-const struct xattr_handler *ecryptfs_xattr_handlers[] = {
+const struct xattr_handler * const ecryptfs_xattr_handlers[] = {
&ecryptfs_xattr_handler,
NULL
};
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index 59b52718a3a2..7e9961639802 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -51,7 +51,7 @@ static ssize_t efivarfs_file_write(struct file *file,
} else {
inode_lock(inode);
i_size_write(inode, datasize + sizeof(attributes));
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
inode_unlock(inode);
}
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index db9231f0e77b..76dd3c7295d9 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -25,7 +25,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
switch (mode & S_IFMT) {
case S_IFREG:
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 3789d22ba501..7844ab24b813 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -103,10 +103,9 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
i_uid_write(inode, (uid_t)be16_to_cpu(efs_inode->di_uid));
i_gid_write(inode, (gid_t)be16_to_cpu(efs_inode->di_gid));
inode->i_size = be32_to_cpu(efs_inode->di_size);
- inode->i_atime.tv_sec = be32_to_cpu(efs_inode->di_atime);
- inode->i_mtime.tv_sec = be32_to_cpu(efs_inode->di_mtime);
+ inode_set_atime(inode, be32_to_cpu(efs_inode->di_atime), 0);
+ inode_set_mtime(inode, be32_to_cpu(efs_inode->di_mtime), 0);
inode_set_ctime(inode, be32_to_cpu(efs_inode->di_ctime), 0);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
/* this is the number of blocks in the file */
if (inode->i_size == 0) {
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index 0c2c99c58b5e..f6a0a1748521 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -222,7 +222,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
up_read(&devs->rwsem);
return 0;
}
- map->m_bdev = dif->bdev;
+ map->m_bdev = dif->bdev_handle->bdev;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;
@@ -240,7 +240,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
map->m_pa -= startoff;
- map->m_bdev = dif->bdev;
+ map->m_bdev = dif->bdev_handle->bdev;
map->m_daxdev = dif->dax_dev;
map->m_dax_part_off = dif->dax_part_off;
map->m_fscache = dif->fscache;
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index edc8ec7581b8..b8ad05b4509d 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -175,7 +175,8 @@ static void *erofs_read_inode(struct erofs_buf *buf,
vi->chunkbits = sb->s_blocksize_bits +
(vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
}
- inode->i_mtime = inode->i_atime = inode_get_ctime(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_get_ctime(inode)));
inode->i_flags &= ~S_DAX;
if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 4ff88d0dd980..cf04e21bfda2 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -47,7 +47,7 @@ typedef u32 erofs_blk_t;
struct erofs_device_info {
char *path;
struct erofs_fscache *fscache;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct dax_device *dax_dev;
u64 dax_part_off;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 3700af9ee173..6fd04781fec5 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -227,7 +227,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_fscache *fscache;
struct erofs_deviceslot *dis;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
void *ptr;
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
@@ -251,13 +251,13 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
return PTR_ERR(fscache);
dif->fscache = fscache;
} else if (!sbi->devs->flatdev) {
- bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
- NULL);
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- dif->bdev = bdev;
- dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
- NULL, NULL);
+ bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ,
+ sb->s_type, NULL);
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
+ dif->bdev_handle = bdev_handle;
+ dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev,
+ &dif->dax_part_off, NULL, NULL);
}
dif->blocks = le32_to_cpu(dis->blocks);
@@ -806,8 +806,8 @@ static int erofs_release_device_info(int id, void *ptr, void *data)
struct erofs_device_info *dif = ptr;
fs_put_dax(dif->dax_dev, NULL);
- if (dif->bdev)
- blkdev_put(dif->bdev, &erofs_fs_type);
+ if (dif->bdev_handle)
+ bdev_release(dif->bdev_handle);
erofs_fscache_unregister_cookie(dif->fscache);
dif->fscache = NULL;
kfree(dif->path);
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index 09d341675e89..b58316b49a43 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -168,7 +168,7 @@ const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
};
#endif
-const struct xattr_handler *erofs_xattr_handlers[] = {
+const struct xattr_handler * const erofs_xattr_handlers[] = {
&erofs_xattr_user_handler,
&erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index f16283cb8c93..b246cd0e135e 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -23,7 +23,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx,
{
const struct xattr_handler *handler = NULL;
- static const struct xattr_handler *xattr_handler_map[] = {
+ static const struct xattr_handler * const xattr_handler_map[] = {
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
#ifdef CONFIG_EROFS_FS_POSIX_ACL
[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
@@ -44,7 +44,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx,
return xattr_prefix(handler);
}
-extern const struct xattr_handler *erofs_xattr_handlers[];
+extern const struct xattr_handler * const erofs_xattr_handlers[];
int erofs_xattr_prefixes_init(struct super_block *sb);
void erofs_xattr_prefixes_cleanup(struct super_block *sb);
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index f55498e5c23d..f78b614f44dc 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -549,6 +549,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 tz, __le16 time, __le16 date, u8 time_cs);
void exfat_truncate_atime(struct timespec64 *ts);
+void exfat_truncate_inode_atime(struct inode *inode);
void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
u8 *tz, __le16 *time, __le16 *date, u8 *time_cs);
u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type);
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 32395ef686a2..30ee2c8d36a5 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -22,7 +22,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
if (err)
return err;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
if (!IS_SYNC(inode))
@@ -290,10 +290,9 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
if (attr->ia_valid & ATTR_SIZE)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- setattr_copy(&nop_mnt_idmap, inode, attr);
- exfat_truncate_atime(&inode->i_atime);
+ exfat_truncate_inode_atime(inode);
if (attr->ia_valid & ATTR_SIZE) {
error = exfat_block_truncate_page(inode, attr->ia_size);
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 13329baeafbc..a2185e6f0548 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -26,6 +26,7 @@ int __exfat_write_inode(struct inode *inode, int sync)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
bool is_dir = (ei->type == TYPE_DIR) ? true : false;
+ struct timespec64 ts;
if (inode->i_ino == EXFAT_ROOT_INO)
return 0;
@@ -55,16 +56,18 @@ int __exfat_write_inode(struct inode *inode, int sync)
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
&ep->dentry.file.create_time_cs);
- exfat_set_entry_time(sbi, &inode->i_mtime,
- &ep->dentry.file.modify_tz,
- &ep->dentry.file.modify_time,
- &ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_cs);
- exfat_set_entry_time(sbi, &inode->i_atime,
- &ep->dentry.file.access_tz,
- &ep->dentry.file.access_time,
- &ep->dentry.file.access_date,
- NULL);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.modify_tz,
+ &ep->dentry.file.modify_time,
+ &ep->dentry.file.modify_date,
+ &ep->dentry.file.modify_time_cs);
+ inode_set_mtime_to_ts(inode, ts);
+ exfat_set_entry_time(sbi, &ts,
+ &ep->dentry.file.access_tz,
+ &ep->dentry.file.access_time,
+ &ep->dentry.file.access_date,
+ NULL);
+ inode_set_atime_to_ts(inode, ts);
/* File size should be zero if there is no cluster allocated */
on_disk_size = i_size_read(inode);
@@ -355,7 +358,7 @@ static void exfat_write_failed(struct address_space *mapping, loff_t to)
if (to > i_size_read(inode)) {
truncate_pagecache(inode, i_size_read(inode));
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
exfat_truncate(inode);
}
}
@@ -398,7 +401,7 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
exfat_write_failed(mapping, pos+len);
if (!(err < 0) && !(ei->attr & ATTR_ARCHIVE)) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ei->attr |= ATTR_ARCHIVE;
mark_inode_dirty(inode);
}
@@ -576,10 +579,10 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
exfat_save_attr(inode, info->attr);
inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9;
- inode->i_mtime = info->mtime;
+ inode_set_mtime_to_ts(inode, info->mtime);
inode_set_ctime_to_ts(inode, info->mtime);
ei->i_crtime = info->crtime;
- inode->i_atime = info->atime;
+ inode_set_atime_to_ts(inode, info->atime);
return 0;
}
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
index 2e1a1a6b1021..fa8459828046 100644
--- a/fs/exfat/misc.c
+++ b/fs/exfat/misc.c
@@ -126,6 +126,14 @@ void exfat_truncate_atime(struct timespec64 *ts)
ts->tv_nsec = 0;
}
+void exfat_truncate_inode_atime(struct inode *inode)
+{
+ struct timespec64 atime = inode_get_atime(inode);
+
+ exfat_truncate_atime(&atime);
+ inode_set_atime_to_ts(inode, atime);
+}
+
u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type)
{
int i;
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 1b9f587f6cca..b92e46916dea 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -569,7 +569,7 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -582,8 +582,9 @@ static int exfat_create(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = EXFAT_I(inode)->i_crtime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ EXFAT_I(inode)->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
+
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -816,16 +817,16 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
ei->dir.dir = DIR_DELETED;
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
- exfat_truncate_atime(&dir->i_atime);
+ simple_inode_init_ts(dir);
+ exfat_truncate_inode_atime(dir);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
mark_inode_dirty(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
unlock:
@@ -851,7 +852,7 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -865,8 +866,8 @@ static int exfat_mkdir(struct mnt_idmap *idmap, struct inode *dir,
goto unlock;
inode_inc_iversion(inode);
- inode->i_mtime = inode->i_atime = EXFAT_I(inode)->i_crtime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ EXFAT_I(inode)->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
d_instantiate(dentry, inode);
@@ -977,8 +978,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
ei->dir.dir = DIR_DELETED;
inode_inc_iversion(dir);
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
- exfat_truncate_atime(&dir->i_atime);
+ simple_inode_init_ts(dir);
+ exfat_truncate_inode_atime(dir);
if (IS_DIRSYNC(dir))
exfat_sync_inode(dir);
else
@@ -986,8 +987,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
clear_nlink(inode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
exfat_unhash_inode(inode);
exfat_d_version_set(dentry, inode_query_iversion(dir));
unlock:
@@ -1312,7 +1313,7 @@ static int exfat_rename(struct mnt_idmap *idmap,
inode_inc_iversion(new_dir);
simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry);
EXFAT_I(new_dir)->i_crtime = current_time(new_dir);
- exfat_truncate_atime(&new_dir->i_atime);
+ exfat_truncate_inode_atime(new_dir);
if (IS_DIRSYNC(new_dir))
exfat_sync_inode(new_dir);
else
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 2778bd9b631e..e919a68bf4a1 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -370,8 +370,8 @@ static int exfat_read_root(struct inode *inode)
ei->i_size_ondisk = i_size_read(inode);
exfat_save_attr(inode, ATTR_SUBDIR);
- inode->i_mtime = inode->i_atime = ei->i_crtime = inode_set_ctime_current(inode);
- exfat_truncate_atime(&inode->i_atime);
+ ei->i_crtime = simple_inode_init_ts(inode);
+ exfat_truncate_inode_atime(inode);
return 0;
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index b335f17f682f..c7900868171b 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -468,7 +468,7 @@ int ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
ext2_set_de_type(de, inode);
ext2_commit_chunk(page, pos, len);
if (update_times)
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
return ext2_handle_dirsync(dir);
@@ -555,7 +555,7 @@ got_it:
de->inode = cpu_to_le32(inode->i_ino);
ext2_set_de_type (de, inode);
ext2_commit_chunk(page, pos, rec_len);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(dir);
err = ext2_handle_dirsync(dir);
@@ -606,7 +606,7 @@ int ext2_delete_entry(struct ext2_dir_entry_2 *dir, struct page *page)
pde->rec_len = ext2_rec_len_to_disk(to - from);
dir->inode = 0;
ext2_commit_chunk(page, pos, to - from);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
mark_inode_dirty(inode);
return ext2_handle_dirsync(inode);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c24d0de95a83..fdf63e9c6e7c 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -546,7 +546,7 @@ got:
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_flags =
ext2_mask_flags(mode, EXT2_I(dir)->i_flags & EXT2_FL_INHERITED);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 314b415ee518..464faf6c217e 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1291,7 +1291,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
__ext2_truncate_blocks(inode, newsize);
filemap_invalidate_unlock(inode->i_mapping);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (inode_needs_sync(inode)) {
sync_mapping_buffers(inode->i_mapping);
sync_inode_metadata(inode, 1);
@@ -1412,10 +1412,9 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
i_gid_write(inode, i_gid);
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le32_to_cpu(raw_inode->i_size);
- inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+ inode_set_atime(inode, (signed)le32_to_cpu(raw_inode->i_atime), 0);
inode_set_ctime(inode, (signed)le32_to_cpu(raw_inode->i_ctime), 0);
- inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
+ inode_set_mtime(inode, (signed)le32_to_cpu(raw_inode->i_mtime), 0);
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
* This is needed because nfsd might try to access dead inodes
@@ -1544,9 +1543,9 @@ static int __ext2_write_inode(struct inode *inode, int do_sync)
}
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(inode->i_size);
- raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
- raw_inode->i_ctime = cpu_to_le32(inode_get_ctime(inode).tv_sec);
- raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
+ raw_inode->i_atime = cpu_to_le32(inode_get_atime_sec(inode));
+ raw_inode->i_ctime = cpu_to_le32(inode_get_ctime_sec(inode));
+ raw_inode->i_mtime = cpu_to_le32(inode_get_mtime_sec(inode));
raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index aaf3e3e88cb2..645ee6142f69 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1572,7 +1572,7 @@ out:
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode_inc_iversion(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return len - towrite;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 20f741184673..e849241ebb8f 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -98,7 +98,7 @@ static struct buffer_head *ext2_xattr_cache_find(struct inode *,
static void ext2_xattr_rehash(struct ext2_xattr_header *,
struct ext2_xattr_entry *);
-static const struct xattr_handler *ext2_xattr_handler_map[] = {
+static const struct xattr_handler * const ext2_xattr_handler_map[] = {
[EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
#ifdef CONFIG_EXT2_FS_POSIX_ACL
[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
@@ -110,7 +110,7 @@ static const struct xattr_handler *ext2_xattr_handler_map[] = {
#endif
};
-const struct xattr_handler *ext2_xattr_handlers[] = {
+const struct xattr_handler * const ext2_xattr_handlers[] = {
&ext2_xattr_user_handler,
&ext2_xattr_trusted_handler,
#ifdef CONFIG_EXT2_FS_SECURITY
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index 7925f596e8e2..6a4966949047 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -72,7 +72,7 @@ extern void ext2_xattr_delete_inode(struct inode *);
extern struct mb_cache *ext2_xattr_create_cache(void);
extern void ext2_xattr_destroy_cache(struct mb_cache *cache);
-extern const struct xattr_handler *ext2_xattr_handlers[];
+extern const struct xattr_handler * const ext2_xattr_handlers[];
# else /* CONFIG_EXT2_FS_XATTR */
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 453d4da5de52..7ae0b61258a7 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -232,19 +232,14 @@ static bool ext4_has_stable_inodes(struct super_block *sb)
return ext4_has_feature_stable_inodes(sb);
}
-static void ext4_get_ino_and_lblk_bits(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret)
-{
- *ino_bits_ret = 8 * sizeof(EXT4_SB(sb)->s_es->s_inodes_count);
- *lblk_bits_ret = 8 * sizeof(ext4_lblk_t);
-}
-
const struct fscrypt_operations ext4_cryptops = {
- .key_prefix = "ext4:",
+ .needs_bounce_pages = 1,
+ .has_32bit_inodes = 1,
+ .supports_subblock_data_units = 1,
+ .legacy_key_prefix = "ext4:",
.get_context = ext4_get_context,
.set_context = ext4_set_context,
.get_dummy_policy = ext4_get_dummy_policy,
.empty_dir = ext4_empty_dir,
.has_stable_inodes = ext4_has_stable_inodes,
- .get_ino_and_lblk_bits = ext4_get_ino_and_lblk_bits,
};
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 9418359b1d9d..8da5fb680210 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -891,10 +891,13 @@ do { \
(raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (ts).tv_sec, S32_MIN, S32_MAX)); \
} while (0)
-#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
- EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, (inode)->xtime)
+#define EXT4_INODE_SET_ATIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_atime, inode, raw_inode, inode_get_atime(inode))
-#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
+#define EXT4_INODE_SET_MTIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_mtime, inode, raw_inode, inode_get_mtime(inode))
+
+#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
EXT4_INODE_SET_XTIME_VAL(i_ctime, inode, raw_inode, inode_get_ctime(inode))
#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
@@ -910,9 +913,16 @@ do { \
.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime) \
})
-#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \
+#define EXT4_INODE_GET_ATIME(inode, raw_inode) \
+do { \
+ inode_set_atime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_atime, inode, raw_inode)); \
+} while (0)
+
+#define EXT4_INODE_GET_MTIME(inode, raw_inode) \
do { \
- (inode)->xtime = EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode); \
+ inode_set_mtime_to_ts(inode, \
+ EXT4_INODE_GET_XTIME_VAL(i_mtime, inode, raw_inode)); \
} while (0)
#define EXT4_INODE_GET_CTIME(inode, raw_inode) \
@@ -1537,7 +1547,7 @@ struct ext4_sb_info {
unsigned long s_commit_interval;
u32 s_max_batch_time;
u32 s_min_batch_time;
- struct block_device *s_journal_bdev;
+ struct bdev_handle *s_journal_bdev_handle;
#ifdef CONFIG_QUOTA
/* Names of quota files with journalled quota */
char __rcu *s_qf_names[EXT4_MAXQUOTAS];
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 202c76996b62..4c4176ee1749 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4481,7 +4481,8 @@ retry:
if (epos > new_size)
epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1)
- inode->i_mtime = inode_get_ctime(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_get_ctime(inode));
}
ret2 = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -4617,7 +4618,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Now release the pages and zero block aligned part of pages */
truncate_pagecache_range(inode, start, end - 1);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
flags);
@@ -4642,7 +4643,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
goto out_mutex;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (new_size)
ext4_update_inode_size(inode, new_size);
ret = ext4_mark_inode_dirty(handle, inode);
@@ -5378,7 +5379,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
@@ -5488,7 +5489,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
EXT4_I(inode)->i_disksize += len;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ext4_mark_inode_dirty(handle, inode);
if (ret)
goto out_stop;
diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c
index cdf9bfe10137..11e6f33677a2 100644
--- a/fs/ext4/fsmap.c
+++ b/fs/ext4/fsmap.c
@@ -576,8 +576,9 @@ static bool ext4_getfsmap_is_valid_device(struct super_block *sb,
if (fm->fmr_device == 0 || fm->fmr_device == UINT_MAX ||
fm->fmr_device == new_encode_dev(sb->s_bdev->bd_dev))
return true;
- if (EXT4_SB(sb)->s_journal_bdev &&
- fm->fmr_device == new_encode_dev(EXT4_SB(sb)->s_journal_bdev->bd_dev))
+ if (EXT4_SB(sb)->s_journal_bdev_handle &&
+ fm->fmr_device ==
+ new_encode_dev(EXT4_SB(sb)->s_journal_bdev_handle->bdev->bd_dev))
return true;
return false;
}
@@ -647,9 +648,9 @@ int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head,
memset(handlers, 0, sizeof(handlers));
handlers[0].gfd_dev = new_encode_dev(sb->s_bdev->bd_dev);
handlers[0].gfd_fn = ext4_getfsmap_datadev;
- if (EXT4_SB(sb)->s_journal_bdev) {
+ if (EXT4_SB(sb)->s_journal_bdev_handle) {
handlers[1].gfd_dev = new_encode_dev(
- EXT4_SB(sb)->s_journal_bdev->bd_dev);
+ EXT4_SB(sb)->s_journal_bdev_handle->bdev->bd_dev);
handlers[1].gfd_fn = ext4_getfsmap_logdev;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index b65058d972f9..e9bbb1da2d0a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1250,8 +1250,8 @@ got:
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
/* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- ei->i_crtime = inode->i_mtime;
+ simple_inode_init_ts(inode);
+ ei->i_crtime = inode_get_mtime(inode);
memset(ei->i_data, 0, sizeof(ei->i_data));
ei->i_dir_start_lookup = 0;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 012d9259ff53..9a84a5f9fef4 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1037,7 +1037,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
return 1;
@@ -1991,7 +1991,7 @@ out:
ext4_orphan_del(handle, inode);
if (err == 0) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err = ext4_mark_inode_dirty(handle, inode);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4ce35f1c8b0a..08cb5c0e0d51 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4020,7 +4020,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret2 = ext4_mark_inode_dirty(handle, inode);
if (unlikely(ret2))
ret = ret2;
@@ -4180,7 +4180,7 @@ out_stop:
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err2 = ext4_mark_inode_dirty(handle, inode);
if (unlikely(err2 && !err))
err = err2;
@@ -4284,8 +4284,8 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
EXT4_INODE_SET_CTIME(inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_SET_MTIME(inode, raw_inode);
+ EXT4_INODE_SET_ATIME(inode, raw_inode);
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
@@ -4893,8 +4893,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
}
EXT4_INODE_GET_CTIME(inode, raw_inode);
- EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_GET_ATIME(inode, raw_inode);
+ EXT4_INODE_GET_MTIME(inode, raw_inode);
EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
@@ -5019,8 +5019,8 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
spin_lock(&ei->i_raw_lock);
EXT4_INODE_SET_CTIME(inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ EXT4_INODE_SET_MTIME(inode, raw_inode);
+ EXT4_INODE_SET_ATIME(inode, raw_inode);
ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
trace_ext4_other_inode_update_time(inode, orig_ino);
@@ -5413,7 +5413,8 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
* update c/mtime in shrink case below
*/
if (!shrink)
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
if (shrink)
ext4_fc_track_range(handle, inode,
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0bfe2ce589e2..4f931f80cb34 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -312,13 +312,22 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
struct ext4_inode_info *ei1;
struct ext4_inode_info *ei2;
unsigned long tmp;
+ struct timespec64 ts1, ts2;
ei1 = EXT4_I(inode1);
ei2 = EXT4_I(inode2);
swap(inode1->i_version, inode2->i_version);
- swap(inode1->i_atime, inode2->i_atime);
- swap(inode1->i_mtime, inode2->i_mtime);
+
+ ts1 = inode_get_atime(inode1);
+ ts2 = inode_get_atime(inode2);
+ inode_set_atime_to_ts(inode1, ts2);
+ inode_set_atime_to_ts(inode2, ts1);
+
+ ts1 = inode_get_mtime(inode1);
+ ts2 = inode_get_mtime(inode2);
+ inode_set_mtime_to_ts(inode1, ts2);
+ inode_set_mtime_to_ts(inode2, ts1);
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index bbda587f76b8..057d74467293 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2207,7 +2207,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
* happen is that the times are slightly out of date
* and/or different from the directory change time.
*/
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
err2 = ext4_mark_inode_dirty(handle, dir);
@@ -3202,7 +3202,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
* recovery. */
inode->i_size = 0;
ext4_orphan_add(handle, inode);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_set_ctime_current(inode);
retval = ext4_mark_inode_dirty(handle, inode);
if (retval)
@@ -3277,7 +3277,7 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
retval = ext4_delete_entry(handle, dir, de, bh);
if (retval)
goto out_handle;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
ext4_update_dx_flag(dir);
retval = ext4_mark_inode_dirty(handle, dir);
if (retval)
@@ -3648,7 +3648,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
if (ext4_has_feature_filetype(ent->dir->i_sb))
ent->de->file_type = file_type;
inode_inc_iversion(ent->dir);
- ent->dir->i_mtime = inode_set_ctime_current(ent->dir);
+ inode_set_mtime_to_ts(ent->dir, inode_set_ctime_current(ent->dir));
retval = ext4_mark_inode_dirty(handle, ent->dir);
BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
if (!ent->inlined) {
@@ -3963,7 +3963,7 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
ext4_dec_count(new.inode);
inode_set_ctime_current(new.inode);
}
- old.dir->i_mtime = inode_set_ctime_current(old.dir);
+ inode_set_mtime_to_ts(old.dir, inode_set_ctime_current(old.dir));
ext4_update_dx_flag(old.dir);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index dbebd8b3127e..42a44990d99c 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1351,14 +1351,14 @@ static void ext4_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
invalidate_bdev(sb->s_bdev);
- if (sbi->s_journal_bdev) {
+ if (sbi->s_journal_bdev_handle) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
* hotswapped, and it breaks the `ro-after' testing code.
*/
- sync_blockdev(sbi->s_journal_bdev);
- invalidate_bdev(sbi->s_journal_bdev);
+ sync_blockdev(sbi->s_journal_bdev_handle->bdev);
+ invalidate_bdev(sbi->s_journal_bdev_handle->bdev);
}
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
@@ -4233,7 +4233,7 @@ int ext4_calculate_overhead(struct super_block *sb)
* Add the internal journal blocks whether the journal has been
* loaded or not
*/
- if (sbi->s_journal && !sbi->s_journal_bdev)
+ if (sbi->s_journal && !sbi->s_journal_bdev_handle)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_total_len);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
/* j_inum for internal journal is non-zero */
@@ -5670,9 +5670,9 @@ failed_mount:
#endif
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
brelse(sbi->s_sbh);
- if (sbi->s_journal_bdev) {
- invalidate_bdev(sbi->s_journal_bdev);
- blkdev_put(sbi->s_journal_bdev, sb);
+ if (sbi->s_journal_bdev_handle) {
+ invalidate_bdev(sbi->s_journal_bdev_handle->bdev);
+ bdev_release(sbi->s_journal_bdev_handle);
}
out_fail:
invalidate_bdev(sb->s_bdev);
@@ -5842,12 +5842,13 @@ static journal_t *ext4_open_inode_journal(struct super_block *sb,
return journal;
}
-static struct block_device *ext4_get_journal_blkdev(struct super_block *sb,
+static struct bdev_handle *ext4_get_journal_blkdev(struct super_block *sb,
dev_t j_dev, ext4_fsblk_t *j_start,
ext4_fsblk_t *j_len)
{
struct buffer_head *bh;
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int hblock, blocksize;
ext4_fsblk_t sb_block;
unsigned long offset;
@@ -5856,16 +5857,17 @@ static struct block_device *ext4_get_journal_blkdev(struct super_block *sb,
/* see get_tree_bdev why this is needed and safe */
up_write(&sb->s_umount);
- bdev = blkdev_get_by_dev(j_dev, BLK_OPEN_READ | BLK_OPEN_WRITE, sb,
- &fs_holder_ops);
+ bdev_handle = bdev_open_by_dev(j_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ sb, &fs_holder_ops);
down_write(&sb->s_umount);
- if (IS_ERR(bdev)) {
+ if (IS_ERR(bdev_handle)) {
ext4_msg(sb, KERN_ERR,
"failed to open journal device unknown-block(%u,%u) %ld",
- MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev));
- return ERR_CAST(bdev);
+ MAJOR(j_dev), MINOR(j_dev), PTR_ERR(bdev_handle));
+ return bdev_handle;
}
+ bdev = bdev_handle->bdev;
blocksize = sb->s_blocksize;
hblock = bdev_logical_block_size(bdev);
if (blocksize < hblock) {
@@ -5912,12 +5914,12 @@ static struct block_device *ext4_get_journal_blkdev(struct super_block *sb,
*j_start = sb_block + 1;
*j_len = ext4_blocks_count(es);
brelse(bh);
- return bdev;
+ return bdev_handle;
out_bh:
brelse(bh);
out_bdev:
- blkdev_put(bdev, sb);
+ bdev_release(bdev_handle);
return ERR_PTR(errno);
}
@@ -5927,14 +5929,14 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
journal_t *journal;
ext4_fsblk_t j_start;
ext4_fsblk_t j_len;
- struct block_device *journal_bdev;
+ struct bdev_handle *bdev_handle;
int errno = 0;
- journal_bdev = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
- if (IS_ERR(journal_bdev))
- return ERR_CAST(journal_bdev);
+ bdev_handle = ext4_get_journal_blkdev(sb, j_dev, &j_start, &j_len);
+ if (IS_ERR(bdev_handle))
+ return ERR_CAST(bdev_handle);
- journal = jbd2_journal_init_dev(journal_bdev, sb->s_bdev, j_start,
+ journal = jbd2_journal_init_dev(bdev_handle->bdev, sb->s_bdev, j_start,
j_len, sb->s_blocksize);
if (IS_ERR(journal)) {
ext4_msg(sb, KERN_ERR, "failed to create device journal");
@@ -5949,14 +5951,14 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
goto out_journal;
}
journal->j_private = sb;
- EXT4_SB(sb)->s_journal_bdev = journal_bdev;
+ EXT4_SB(sb)->s_journal_bdev_handle = bdev_handle;
ext4_init_journal_params(sb, journal);
return journal;
out_journal:
jbd2_journal_destroy(journal);
out_bdev:
- blkdev_put(journal_bdev, sb);
+ bdev_release(bdev_handle);
return ERR_PTR(errno);
}
@@ -7127,7 +7129,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
}
EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out_unlock:
@@ -7300,12 +7302,12 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
static void ext4_kill_sb(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct block_device *journal_bdev = sbi ? sbi->s_journal_bdev : NULL;
+ struct bdev_handle *handle = sbi ? sbi->s_journal_bdev_handle : NULL;
kill_block_super(sb);
- if (journal_bdev)
- blkdev_put(journal_bdev, sb);
+ if (handle)
+ bdev_release(handle);
}
static struct file_system_type ext4_fs_type = {
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 92ba28cebac6..82dc5e673d5c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -98,7 +98,7 @@ static const struct xattr_handler * const ext4_xattr_handler_map[] = {
[EXT4_XATTR_INDEX_HURD] = &ext4_xattr_hurd_handler,
};
-const struct xattr_handler *ext4_xattr_handlers[] = {
+const struct xattr_handler * const ext4_xattr_handlers[] = {
&ext4_xattr_user_handler,
&ext4_xattr_trusted_handler,
#ifdef CONFIG_EXT4_FS_SECURITY
@@ -356,7 +356,7 @@ ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
{
- return ((u64) inode_get_ctime(ea_inode).tv_sec << 32) |
+ return ((u64) inode_get_ctime_sec(ea_inode) << 32) |
(u32) inode_peek_iversion_raw(ea_inode);
}
@@ -368,12 +368,12 @@ static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
{
- return (u32)ea_inode->i_atime.tv_sec;
+ return (u32) inode_get_atime_sec(ea_inode);
}
static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
{
- ea_inode->i_atime.tv_sec = hash;
+ inode_set_atime(ea_inode, hash, 0);
}
/*
@@ -418,7 +418,7 @@ free_bhs:
return ret;
}
-#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
+#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode_get_mtime_sec(inode)))
static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
u32 ea_inode_hash, struct inode **ea_inode)
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 824faf0b15a8..bd97c4aa8177 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -193,7 +193,7 @@ extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle);
extern void ext4_evict_ea_inode(struct inode *inode);
-extern const struct xattr_handler *ext4_xattr_handlers[];
+extern const struct xattr_handler * const ext4_xattr_handlers[];
extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
struct ext4_xattr_ibody_find *is);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 8aa29fe2e87b..042593aed1ec 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -455,7 +455,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
de->file_type = fs_umode_to_ftype(inode->i_mode);
set_page_dirty(page);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
f2fs_put_page(page, 1);
}
@@ -609,7 +609,7 @@ void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
f2fs_i_links_write(dir, true);
clear_inode_flag(inode, FI_NEW_INODE);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (F2FS_I(dir)->i_current_depth != current_depth)
@@ -919,7 +919,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
}
f2fs_put_page(page, 1);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 6d688e42d89c..9043cedfa12b 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1234,6 +1234,7 @@ struct f2fs_bio_info {
#define FDEV(i) (sbi->devs[i])
#define RDEV(i) (raw_super->devs[i])
struct f2fs_dev_info {
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
char path[MAX_PATH_LEN];
unsigned int total_segments;
@@ -3317,13 +3318,15 @@ static inline void clear_file(struct inode *inode, int type)
static inline bool f2fs_is_time_consistent(struct inode *inode)
{
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts = inode_get_atime(inode);
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts))
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ctime))
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts))
return false;
- if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts))
return false;
return true;
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ca5904129b16..dd99abbb7186 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -798,7 +798,7 @@ int f2fs_truncate(struct inode *inode)
if (err)
return err;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
@@ -905,9 +905,9 @@ static void __setattr_copy(struct mnt_idmap *idmap,
i_uid_update(idmap, attr, inode);
i_gid_update(idmap, attr, inode);
if (ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (ia_valid & ATTR_MODE) {
@@ -1012,7 +1012,7 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
return err;
spin_lock(&F2FS_I(inode)->i_size_lock);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
F2FS_I(inode)->last_disk_size = i_size_read(inode);
spin_unlock(&F2FS_I(inode)->i_size_lock);
}
@@ -1840,7 +1840,7 @@ static long f2fs_fallocate(struct file *file, int mode,
}
if (!ret) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -2888,10 +2888,10 @@ out_src:
if (ret)
goto out_unlock;
- src->i_mtime = inode_set_ctime_current(src);
+ inode_set_mtime_to_ts(src, inode_set_ctime_current(src));
f2fs_mark_inode_dirty_sync(src, false);
if (src != dst) {
- dst->i_mtime = inode_set_ctime_current(dst);
+ inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst));
f2fs_mark_inode_dirty_sync(dst, false);
}
f2fs_update_time(sbi, REQ_TIME);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 2fe25619ccb5..ac00423f117b 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -699,7 +699,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
f2fs_put_page(page, 1);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index cde243840abd..5779c7edd49b 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -386,9 +386,9 @@ static void init_idisk_time(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- fi->i_disk_time[0] = inode->i_atime;
+ fi->i_disk_time[0] = inode_get_atime(inode);
fi->i_disk_time[1] = inode_get_ctime(inode);
- fi->i_disk_time[2] = inode->i_mtime;
+ fi->i_disk_time[2] = inode_get_mtime(inode);
}
static int do_read_inode(struct inode *inode)
@@ -417,12 +417,12 @@ static int do_read_inode(struct inode *inode)
inode->i_size = le64_to_cpu(ri->i_size);
inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
- inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
+ inode_set_atime(inode, le64_to_cpu(ri->i_atime),
+ le32_to_cpu(ri->i_atime_nsec));
inode_set_ctime(inode, le64_to_cpu(ri->i_ctime),
le32_to_cpu(ri->i_ctime_nsec));
- inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
+ inode_set_mtime(inode, le64_to_cpu(ri->i_mtime),
+ le32_to_cpu(ri->i_mtime_nsec));
inode->i_generation = le32_to_cpu(ri->i_generation);
if (S_ISDIR(inode->i_mode))
fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
@@ -698,12 +698,12 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
}
set_raw_inline(inode, ri);
- ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- ri->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ ri->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
+ ri->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ ri->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
+ ri->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ ri->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ri->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
if (S_ISDIR(inode->i_mode))
ri->i_current_depth =
cpu_to_le32(F2FS_I(inode)->i_current_depth);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 193b22a2d6bf..d0053b0284d8 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -243,8 +243,8 @@ static struct inode *f2fs_new_inode(struct mnt_idmap *idmap,
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- F2FS_I(inode)->i_crtime = inode->i_mtime;
+ simple_inode_init_ts(inode);
+ F2FS_I(inode)->i_crtime = inode_get_mtime(inode);
inode->i_generation = get_random_u32();
if (S_ISDIR(inode->i_mode))
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 7be60df277a5..b56d0f1078a7 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -320,12 +320,12 @@ static int recover_inode(struct inode *inode, struct page *page)
}
f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
- inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
+ inode_set_atime(inode, le64_to_cpu(raw->i_atime),
+ le32_to_cpu(raw->i_atime_nsec));
inode_set_ctime(inode, le64_to_cpu(raw->i_ctime),
le32_to_cpu(raw->i_ctime_nsec));
- inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ inode_set_mtime(inode, le64_to_cpu(raw->i_mtime),
+ le32_to_cpu(raw->i_mtime_nsec));
F2FS_I(inode)->i_advise = raw->i_advise;
F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a8c8232852bb..be17d77513d5 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1562,7 +1562,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
for (i = 0; i < sbi->s_ndevs; i++) {
if (i > 0)
- blkdev_put(FDEV(i).bdev, sbi->sb);
+ bdev_release(FDEV(i).bdev_handle);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
@@ -2710,7 +2710,7 @@ retry:
if (len == towrite)
return err;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
f2fs_mark_inode_dirty_sync(inode, false);
return len - towrite;
}
@@ -3203,13 +3203,6 @@ static bool f2fs_has_stable_inodes(struct super_block *sb)
return true;
}
-static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret)
-{
- *ino_bits_ret = 8 * sizeof(nid_t);
- *lblk_bits_ret = 8 * sizeof(block_t);
-}
-
static struct block_device **f2fs_get_devices(struct super_block *sb,
unsigned int *num_devs)
{
@@ -3231,13 +3224,15 @@ static struct block_device **f2fs_get_devices(struct super_block *sb,
}
static const struct fscrypt_operations f2fs_cryptops = {
- .key_prefix = "f2fs:",
+ .needs_bounce_pages = 1,
+ .has_32bit_inodes = 1,
+ .supports_subblock_data_units = 1,
+ .legacy_key_prefix = "f2fs:",
.get_context = f2fs_get_context,
.set_context = f2fs_set_context,
.get_dummy_policy = f2fs_get_dummy_policy,
.empty_dir = f2fs_empty_dir,
.has_stable_inodes = f2fs_has_stable_inodes,
- .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
.get_devices = f2fs_get_devices,
};
#endif
@@ -4198,7 +4193,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
for (i = 0; i < max_devices; i++) {
if (i == 0)
- FDEV(0).bdev = sbi->sb->s_bdev;
+ FDEV(0).bdev_handle = sbi->sb->s_bdev_handle;
else if (!RDEV(i).path[0])
break;
@@ -4218,13 +4213,14 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
FDEV(i).end_blk = FDEV(i).start_blk +
(FDEV(i).total_segments <<
sbi->log_blocks_per_seg) - 1;
- FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
- mode, sbi->sb, NULL);
+ FDEV(i).bdev_handle = bdev_open_by_path(
+ FDEV(i).path, mode, sbi->sb, NULL);
}
}
- if (IS_ERR(FDEV(i).bdev))
- return PTR_ERR(FDEV(i).bdev);
+ if (IS_ERR(FDEV(i).bdev_handle))
+ return PTR_ERR(FDEV(i).bdev_handle);
+ FDEV(i).bdev = FDEV(i).bdev_handle->bdev;
/* to release errored devices */
sbi->s_ndevs = i + 1;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index a657284faee3..4314456854f6 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -189,7 +189,7 @@ const struct xattr_handler f2fs_xattr_security_handler = {
.set = f2fs_xattr_generic_set,
};
-static const struct xattr_handler *f2fs_xattr_handler_map[] = {
+static const struct xattr_handler * const f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
[F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
@@ -202,7 +202,7 @@ static const struct xattr_handler *f2fs_xattr_handler_map[] = {
[F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
};
-const struct xattr_handler *f2fs_xattr_handlers[] = {
+const struct xattr_handler * const f2fs_xattr_handlers[] = {
&f2fs_xattr_user_handler,
&f2fs_xattr_trusted_handler,
#ifdef CONFIG_F2FS_FS_SECURITY
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index b1811c392e6f..a005ffdcf717 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -125,7 +125,7 @@ extern const struct xattr_handler f2fs_xattr_trusted_handler;
extern const struct xattr_handler f2fs_xattr_advise_handler;
extern const struct xattr_handler f2fs_xattr_security_handler;
-extern const struct xattr_handler *f2fs_xattr_handlers[];
+extern const struct xattr_handler * const f2fs_xattr_handlers[];
extern int f2fs_setxattr(struct inode *, int, const char *,
const void *, size_t, struct page *, int);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cdd39b6020f3..1fac3dabf130 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -512,6 +512,7 @@ static int fat_validate_dir(struct inode *dir)
int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+ struct timespec64 mtime;
int error;
MSDOS_I(inode)->i_pos = 0;
@@ -561,14 +562,18 @@ int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_blocks = ((inode->i_size + (sbi->cluster_size - 1))
& ~((loff_t)sbi->cluster_size - 1)) >> 9;
- fat_time_fat2unix(sbi, &inode->i_mtime, de->time, de->date, 0);
- inode_set_ctime_to_ts(inode, inode->i_mtime);
+ fat_time_fat2unix(sbi, &mtime, de->time, de->date, 0);
+ inode_set_mtime_to_ts(inode, mtime);
+ inode_set_ctime_to_ts(inode, mtime);
if (sbi->options.isvfat) {
- fat_time_fat2unix(sbi, &inode->i_atime, 0, de->adate, 0);
+ struct timespec64 atime;
+
+ fat_time_fat2unix(sbi, &atime, 0, de->adate, 0);
+ inode_set_atime_to_ts(inode, atime);
fat_time_fat2unix(sbi, &MSDOS_I(inode)->i_crtime, de->ctime,
de->cdate, de->ctime_cs);
} else
- inode->i_atime = fat_truncate_atime(sbi, &inode->i_mtime);
+ inode_set_atime_to_ts(inode, fat_truncate_atime(sbi, &mtime));
return 0;
}
@@ -849,6 +854,7 @@ static int __fat_write_inode(struct inode *inode, int wait)
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
struct msdos_dir_entry *raw_entry;
+ struct timespec64 mtime;
loff_t i_pos;
sector_t blocknr;
int err, offset;
@@ -882,12 +888,14 @@ retry:
raw_entry->size = cpu_to_le32(inode->i_size);
raw_entry->attr = fat_make_attrs(inode);
fat_set_start(raw_entry, MSDOS_I(inode)->i_logstart);
- fat_time_unix2fat(sbi, &inode->i_mtime, &raw_entry->time,
+ mtime = inode_get_mtime(inode);
+ fat_time_unix2fat(sbi, &mtime, &raw_entry->time,
&raw_entry->date, NULL);
if (sbi->options.isvfat) {
+ struct timespec64 ts = inode_get_atime(inode);
__le16 atime;
- fat_time_unix2fat(sbi, &inode->i_atime, &atime,
- &raw_entry->adate, NULL);
+
+ fat_time_unix2fat(sbi, &ts, &atime, &raw_entry->adate, NULL);
fat_time_unix2fat(sbi, &MSDOS_I(inode)->i_crtime, &raw_entry->ctime,
&raw_entry->cdate, &raw_entry->ctime_cs);
}
@@ -1407,7 +1415,8 @@ static int fat_read_root(struct inode *inode)
MSDOS_I(inode)->mmu_private = inode->i_size;
fat_save_attrs(inode, ATTR_DIR);
- inode->i_mtime = inode->i_atime = inode_set_ctime(inode, 0, 0);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, 0, 0)));
set_nlink(inode, fat_subdirs(inode)+2);
return 0;
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index f2304a1054aa..c7a2d27120ba 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -325,15 +325,15 @@ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags)
}
if (flags & S_ATIME)
- inode->i_atime = fat_truncate_atime(sbi, now);
+ inode_set_atime_to_ts(inode, fat_truncate_atime(sbi, now));
/*
* ctime and mtime share the same on-disk field, and should be
* identical in memory. all mtime updates will be applied to ctime,
* but ctime updates are ignored.
*/
if (flags & S_MTIME)
- inode->i_mtime = inode_set_ctime_to_ts(inode,
- fat_truncate_mtime(sbi, now));
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, fat_truncate_mtime(sbi, now)));
return 0;
}
diff --git a/fs/file.c b/fs/file.c
index 3e4a4dfa38fc..5fb0b146e79e 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -604,6 +604,9 @@ void fd_install(unsigned int fd, struct file *file)
struct files_struct *files = current->files;
struct fdtable *fdt;
+ if (WARN_ON_ONCE(unlikely(file->f_mode & FMODE_BACKING)))
+ return;
+
rcu_read_lock_sched();
if (unlikely(files->resize_in_progress)) {
@@ -853,8 +856,104 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
+static struct file *__get_file_rcu(struct file __rcu **f)
+{
+ struct file __rcu *file;
+ struct file __rcu *file_reloaded;
+ struct file __rcu *file_reloaded_cmp;
+
+ file = rcu_dereference_raw(*f);
+ if (!file)
+ return NULL;
+
+ if (unlikely(!atomic_long_inc_not_zero(&file->f_count)))
+ return ERR_PTR(-EAGAIN);
+
+ file_reloaded = rcu_dereference_raw(*f);
+
+ /*
+ * Ensure that all accesses have a dependency on the load from
+ * rcu_dereference_raw() above so we get correct ordering
+ * between reuse/allocation and the pointer check below.
+ */
+ file_reloaded_cmp = file_reloaded;
+ OPTIMIZER_HIDE_VAR(file_reloaded_cmp);
+
+ /*
+ * atomic_long_inc_not_zero() above provided a full memory
+ * barrier when we acquired a reference.
+ *
+ * This is paired with the write barrier from assigning to the
+ * __rcu protected file pointer so that if that pointer still
+ * matches the current file, we know we have successfully
+ * acquired a reference to the right file.
+ *
+ * If the pointers don't match the file has been reallocated by
+ * SLAB_TYPESAFE_BY_RCU.
+ */
+ if (file == file_reloaded_cmp)
+ return file_reloaded;
+
+ fput(file);
+ return ERR_PTR(-EAGAIN);
+}
+
+/**
+ * get_file_rcu - try go get a reference to a file under rcu
+ * @f: the file to get a reference on
+ *
+ * This function tries to get a reference on @f carefully verifying that
+ * @f hasn't been reused.
+ *
+ * This function should rarely have to be used and only by users who
+ * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
+ *
+ * Return: Returns @f with the reference count increased or NULL.
+ */
+struct file *get_file_rcu(struct file __rcu **f)
+{
+ for (;;) {
+ struct file __rcu *file;
+
+ file = __get_file_rcu(f);
+ if (unlikely(!file))
+ return NULL;
+
+ if (unlikely(IS_ERR(file)))
+ continue;
+
+ return file;
+ }
+}
+EXPORT_SYMBOL_GPL(get_file_rcu);
+
+/**
+ * get_file_active - try go get a reference to a file
+ * @f: the file to get a reference on
+ *
+ * In contast to get_file_rcu() the pointer itself isn't part of the
+ * reference counting.
+ *
+ * This function should rarely have to be used and only by users who
+ * understand the implications of SLAB_TYPESAFE_BY_RCU. Try to avoid it.
+ *
+ * Return: Returns @f with the reference count increased or NULL.
+ */
+struct file *get_file_active(struct file **f)
+{
+ struct file __rcu *file;
+
+ rcu_read_lock();
+ file = __get_file_rcu(f);
+ rcu_read_unlock();
+ if (IS_ERR(file))
+ file = NULL;
+ return file;
+}
+EXPORT_SYMBOL_GPL(get_file_active);
+
static inline struct file *__fget_files_rcu(struct files_struct *files,
- unsigned int fd, fmode_t mask)
+ unsigned int fd, fmode_t mask)
{
for (;;) {
struct file *file;
@@ -865,12 +964,6 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
return NULL;
fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
- file = rcu_dereference_raw(*fdentry);
- if (unlikely(!file))
- return NULL;
-
- if (unlikely(file->f_mode & mask))
- return NULL;
/*
* Ok, we have a file pointer. However, because we do
@@ -879,10 +972,15 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
*
* Such a race can take two forms:
*
- * (a) the file ref already went down to zero,
- * and get_file_rcu() fails. Just try again:
+ * (a) the file ref already went down to zero and the
+ * file hasn't been reused yet or the file count
+ * isn't zero but the file has already been reused.
*/
- if (unlikely(!get_file_rcu(file)))
+ file = __get_file_rcu(fdentry);
+ if (unlikely(!file))
+ return NULL;
+
+ if (unlikely(IS_ERR(file)))
continue;
/*
@@ -893,13 +991,21 @@ static inline struct file *__fget_files_rcu(struct files_struct *files,
*
* If so, we need to put our ref and try again.
*/
- if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
- unlikely(rcu_dereference_raw(*fdentry) != file)) {
+ if (unlikely(rcu_dereference_raw(files->fdt) != fdt)) {
fput(file);
continue;
}
/*
+ * This isn't the file we're looking for or we're not
+ * allowed to get a reference to it.
+ */
+ if (unlikely(file->f_mode & mask)) {
+ fput(file);
+ return NULL;
+ }
+
+ /*
* Ok, we have a ref to the file, and checked that it
* still exists.
*/
@@ -948,7 +1054,14 @@ struct file *fget_task(struct task_struct *task, unsigned int fd)
return file;
}
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
+struct file *lookup_fdget_rcu(unsigned int fd)
+{
+ return __fget_files_rcu(current->files, fd, 0);
+
+}
+EXPORT_SYMBOL_GPL(lookup_fdget_rcu);
+
+struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd)
{
/* Must be called with rcu_read_lock held */
struct files_struct *files;
@@ -957,13 +1070,13 @@ struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd)
task_lock(task);
files = task->files;
if (files)
- file = files_lookup_fd_rcu(files, fd);
+ file = __fget_files_rcu(files, fd, 0);
task_unlock(task);
return file;
}
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret_fd)
+struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *ret_fd)
{
/* Must be called with rcu_read_lock held */
struct files_struct *files;
@@ -974,7 +1087,7 @@ struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret
files = task->files;
if (files) {
for (; fd < files_fdtable(files)->max_fds; fd++) {
- file = files_lookup_fd_rcu(files, fd);
+ file = __fget_files_rcu(files, fd, 0);
if (file)
break;
}
@@ -983,7 +1096,7 @@ struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *ret
*ret_fd = fd;
return file;
}
-EXPORT_SYMBOL(task_lookup_next_fd_rcu);
+EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
/*
* Lightweight file lookup - no refcnt increment if fd table isn't shared.
@@ -1272,12 +1385,16 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
{
if (unlikely(newfd == oldfd)) { /* corner case */
struct files_struct *files = current->files;
+ struct file *f;
int retval = oldfd;
rcu_read_lock();
- if (!files_lookup_fd_rcu(files, oldfd))
+ f = __fget_files_rcu(files, oldfd, 0);
+ if (!f)
retval = -EBADF;
rcu_read_unlock();
+ if (f)
+ fput(f);
return retval;
}
return ksys_dup3(oldfd, newfd, 0);
diff --git a/fs/file_table.c b/fs/file_table.c
index ee21b3da9d08..fa92743ba6a9 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -44,10 +44,10 @@ static struct kmem_cache *filp_cachep __read_mostly;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
-/* Container for backing file with optional real path */
+/* Container for backing file with optional user path */
struct backing_file {
struct file file;
- struct path real_path;
+ struct path user_path;
};
static inline struct backing_file *backing_file(struct file *f)
@@ -55,31 +55,36 @@ static inline struct backing_file *backing_file(struct file *f)
return container_of(f, struct backing_file, file);
}
-struct path *backing_file_real_path(struct file *f)
+struct path *backing_file_user_path(struct file *f)
{
- return &backing_file(f)->real_path;
+ return &backing_file(f)->user_path;
}
-EXPORT_SYMBOL_GPL(backing_file_real_path);
+EXPORT_SYMBOL_GPL(backing_file_user_path);
-static void file_free_rcu(struct rcu_head *head)
+static inline void file_free(struct file *f)
{
- struct file *f = container_of(head, struct file, f_rcuhead);
-
+ security_file_free(f);
+ if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
+ percpu_counter_dec(&nr_files);
put_cred(f->f_cred);
- if (unlikely(f->f_mode & FMODE_BACKING))
+ if (unlikely(f->f_mode & FMODE_BACKING)) {
+ path_put(backing_file_user_path(f));
kfree(backing_file(f));
- else
+ } else {
kmem_cache_free(filp_cachep, f);
+ }
}
-static inline void file_free(struct file *f)
+void release_empty_file(struct file *f)
{
- security_file_free(f);
- if (unlikely(f->f_mode & FMODE_BACKING))
- path_put(backing_file_real_path(f));
- if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
- percpu_counter_dec(&nr_files);
- call_rcu(&f->f_rcuhead, file_free_rcu);
+ WARN_ON_ONCE(f->f_mode & (FMODE_BACKING | FMODE_OPENED));
+ if (atomic_long_dec_and_test(&f->f_count)) {
+ security_file_free(f);
+ put_cred(f->f_cred);
+ if (likely(!(f->f_mode & FMODE_NOACCOUNT)))
+ percpu_counter_dec(&nr_files);
+ kmem_cache_free(filp_cachep, f);
+ }
}
/*
@@ -164,7 +169,6 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
return error;
}
- atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
mutex_init(&f->f_pos_lock);
@@ -172,6 +176,12 @@ static int init_file(struct file *f, int flags, const struct cred *cred)
f->f_mode = OPEN_FMODE(flags);
/* f->f_version: 0 */
+ /*
+ * We're SLAB_TYPESAFE_BY_RCU so initialize f_count last. While
+ * fget-rcu pattern users need to be able to handle spurious
+ * refcount bumps we should reinitialize the reused file first.
+ */
+ atomic_long_set(&f->f_count, 1);
return 0;
}
@@ -471,7 +481,8 @@ EXPORT_SYMBOL(__fput_sync);
void __init files_init(void)
{
filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
- SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL);
+ SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN |
+ SLAB_PANIC | SLAB_ACCOUNT, NULL);
percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index ac5d43b164b5..20600e9ea202 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -109,11 +109,9 @@ static inline void dip2vip_cpy(struct vxfs_sb_info *sbi,
set_nlink(inode, vip->vii_nlink);
inode->i_size = vip->vii_size;
- inode->i_atime.tv_sec = vip->vii_atime;
+ inode_set_atime(inode, vip->vii_atime, 0);
inode_set_ctime(inode, vip->vii_ctime, 0);
- inode->i_mtime.tv_sec = vip->vii_mtime;
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
+ inode_set_mtime(inode, vip->vii_mtime, 0);
inode->i_blocks = vip->vii_blocks;
inode->i_generation = vip->vii_gen;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c1af01b2c42d..1767493dffda 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -613,6 +613,24 @@ out_free:
kfree(isw);
}
+static bool isw_prepare_wbs_switch(struct inode_switch_wbs_context *isw,
+ struct list_head *list, int *nr)
+{
+ struct inode *inode;
+
+ list_for_each_entry(inode, list, i_io_list) {
+ if (!inode_prepare_wbs_switch(inode, isw->new_wb))
+ continue;
+
+ isw->inodes[*nr] = inode;
+ (*nr)++;
+
+ if (*nr >= WB_MAX_INODES_PER_ISW - 1)
+ return true;
+ }
+ return false;
+}
+
/**
* cleanup_offline_cgwb - detach associated inodes
* @wb: target wb
@@ -625,7 +643,6 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
{
struct cgroup_subsys_state *memcg_css;
struct inode_switch_wbs_context *isw;
- struct inode *inode;
int nr;
bool restart = false;
@@ -647,17 +664,17 @@ bool cleanup_offline_cgwb(struct bdi_writeback *wb)
nr = 0;
spin_lock(&wb->list_lock);
- list_for_each_entry(inode, &wb->b_attached, i_io_list) {
- if (!inode_prepare_wbs_switch(inode, isw->new_wb))
- continue;
-
- isw->inodes[nr++] = inode;
-
- if (nr >= WB_MAX_INODES_PER_ISW - 1) {
- restart = true;
- break;
- }
- }
+ /*
+ * In addition to the inodes that have completed writeback, also switch
+ * cgwbs for those inodes only with dirty timestamps. Otherwise, those
+ * inodes won't be written back for a long time when lazytime is
+ * enabled, and thus pinning the dying cgwbs. It won't break the
+ * bandwidth restrictions, as writeback of inode metadata is not
+ * accounted for.
+ */
+ restart = isw_prepare_wbs_switch(isw, &wb->b_attached, &nr);
+ if (!restart)
+ restart = isw_prepare_wbs_switch(isw, &wb->b_dirty_time, &nr);
spin_unlock(&wb->list_lock);
/* no attached inodes? bail out */
diff --git a/fs/fsopen.c b/fs/fsopen.c
index ce03f6521c88..6593ae518115 100644
--- a/fs/fsopen.c
+++ b/fs/fsopen.c
@@ -465,6 +465,7 @@ SYSCALL_DEFINE5(fsconfig,
param.file = fget(aux);
if (!param.file)
goto out_key;
+ param.dirfd = aux;
break;
default:
break;
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index ab62e4624256..284a35006462 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -235,7 +235,7 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
inode->i_mode = mode;
inode->i_uid = fc->user_id;
inode->i_gid = fc->group_id;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
/* setting ->i_op to NULL is not allowed */
if (iop)
inode->i_op = iop;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index d707e6987da9..d19cbf34c634 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1812,12 +1812,12 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
memset(&outarg, 0, sizeof(outarg));
inarg.valid = FATTR_MTIME;
- inarg.mtime = inode->i_mtime.tv_sec;
- inarg.mtimensec = inode->i_mtime.tv_nsec;
+ inarg.mtime = inode_get_mtime_sec(inode);
+ inarg.mtimensec = inode_get_mtime_nsec(inode);
if (fm->fc->minor >= 23) {
inarg.valid |= FATTR_CTIME;
- inarg.ctime = inode_get_ctime(inode).tv_sec;
- inarg.ctimensec = inode_get_ctime(inode).tv_nsec;
+ inarg.ctime = inode_get_ctime_sec(inode);
+ inarg.ctimensec = inode_get_ctime_nsec(inode);
}
if (ff) {
inarg.valid |= FATTR_FH;
@@ -1956,7 +1956,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
/* the kernel maintains i_mtime locally */
if (trust_local_cmtime) {
if (attr->ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (attr->ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
/* FIXME: clear I_DIRTY_SYNC? */
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index bf0b85d0b95c..6e6e721f421b 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -1284,7 +1284,7 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value,
size_t size);
ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size);
int fuse_removexattr(struct inode *inode, const char *name);
-extern const struct xattr_handler *fuse_xattr_handlers[];
+extern const struct xattr_handler * const fuse_xattr_handlers[];
struct posix_acl;
struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 2e4eb7cf26fb..caa8121ad99c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -188,12 +188,10 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
attr->mtimensec = min_t(u32, attr->mtimensec, NSEC_PER_SEC - 1);
attr->ctimensec = min_t(u32, attr->ctimensec, NSEC_PER_SEC - 1);
- inode->i_atime.tv_sec = attr->atime;
- inode->i_atime.tv_nsec = attr->atimensec;
+ inode_set_atime(inode, attr->atime, attr->atimensec);
/* mtime from server may be stale due to local buffered write */
if (!(cache_mask & STATX_MTIME)) {
- inode->i_mtime.tv_sec = attr->mtime;
- inode->i_mtime.tv_nsec = attr->mtimensec;
+ inode_set_mtime(inode, attr->mtime, attr->mtimensec);
}
if (!(cache_mask & STATX_CTIME)) {
inode_set_ctime(inode, attr->ctime, attr->ctimensec);
@@ -276,12 +274,12 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
attr->size = i_size_read(inode);
if (cache_mask & STATX_MTIME) {
- attr->mtime = inode->i_mtime.tv_sec;
- attr->mtimensec = inode->i_mtime.tv_nsec;
+ attr->mtime = inode_get_mtime_sec(inode);
+ attr->mtimensec = inode_get_mtime_nsec(inode);
}
if (cache_mask & STATX_CTIME) {
- attr->ctime = inode_get_ctime(inode).tv_sec;
- attr->ctimensec = inode_get_ctime(inode).tv_nsec;
+ attr->ctime = inode_get_ctime_sec(inode);
+ attr->ctimensec = inode_get_ctime_nsec(inode);
}
if ((attr_version != 0 && fi->attr_version > attr_version) ||
@@ -290,7 +288,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
return;
}
- old_mtime = inode->i_mtime;
+ old_mtime = inode_get_mtime(inode);
fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask);
oldsize = inode->i_size;
@@ -337,8 +335,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr,
{
inode->i_mode = attr->mode & S_IFMT;
inode->i_size = attr->size;
- inode->i_mtime.tv_sec = attr->mtime;
- inode->i_mtime.tv_nsec = attr->mtimensec;
+ inode_set_mtime(inode, attr->mtime, attr->mtimensec);
inode_set_ctime(inode, attr->ctime, attr->ctimensec);
if (S_ISREG(inode->i_mode)) {
fuse_init_common(inode);
@@ -1423,17 +1420,19 @@ EXPORT_SYMBOL_GPL(fuse_dev_free);
static void fuse_fill_attr_from_inode(struct fuse_attr *attr,
const struct fuse_inode *fi)
{
+ struct timespec64 atime = inode_get_atime(&fi->inode);
+ struct timespec64 mtime = inode_get_mtime(&fi->inode);
struct timespec64 ctime = inode_get_ctime(&fi->inode);
*attr = (struct fuse_attr){
.ino = fi->inode.i_ino,
.size = fi->inode.i_size,
.blocks = fi->inode.i_blocks,
- .atime = fi->inode.i_atime.tv_sec,
- .mtime = fi->inode.i_mtime.tv_sec,
+ .atime = atime.tv_sec,
+ .mtime = mtime.tv_sec,
.ctime = ctime.tv_sec,
- .atimensec = fi->inode.i_atime.tv_nsec,
- .mtimensec = fi->inode.i_mtime.tv_nsec,
+ .atimensec = atime.tv_nsec,
+ .mtimensec = mtime.tv_nsec,
.ctimensec = ctime.tv_nsec,
.mode = fi->inode.i_mode,
.nlink = fi->inode.i_nlink,
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index 9e6d587b3e67..c66a54d6c7d3 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -476,7 +476,7 @@ retry_locked:
if (!fi->rdc.cached) {
/* Starting cache? Set cache mtime. */
if (!ctx->pos && !fi->rdc.size) {
- fi->rdc.mtime = inode->i_mtime;
+ fi->rdc.mtime = inode_get_mtime(inode);
fi->rdc.iversion = inode_query_iversion(inode);
}
spin_unlock(&fi->rdc.lock);
@@ -488,8 +488,10 @@ retry_locked:
* changed, and reset the cache if so.
*/
if (!ctx->pos) {
+ struct timespec64 mtime = inode_get_mtime(inode);
+
if (inode_peek_iversion(inode) != fi->rdc.iversion ||
- !timespec64_equal(&fi->rdc.mtime, &inode->i_mtime)) {
+ !timespec64_equal(&fi->rdc.mtime, &mtime)) {
fuse_rdc_reset(inode);
goto retry_locked;
}
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 49c01559580f..5b423fdbb13f 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -209,7 +209,7 @@ static const struct xattr_handler fuse_xattr_handler = {
.set = fuse_xattr_set,
};
-const struct xattr_handler *fuse_xattr_handlers[] = {
+const struct xattr_handler * const fuse_xattr_handlers[] = {
&fuse_xattr_handler,
NULL
};
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ef7017fb6951..011cd992e0e6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1386,7 +1386,7 @@ static int trunc_start(struct inode *inode, u64 newsize)
ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
i_size_write(inode, newsize);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_dinode_out(ip, dibh->b_data);
if (journaled)
@@ -1583,7 +1583,7 @@ out_unlock:
/* Every transaction boundary, we rewrite the dinode
to keep its di_blocks current in case of failure. */
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -1949,7 +1949,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
gfs2_statfs_change(sdp, 0, +btotal, 0);
gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
ip->i_inode.i_gid);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
up_write(&ip->i_rw_mutex);
@@ -1992,7 +1992,7 @@ static int trunc_end(struct gfs2_inode *ip)
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
gfs2_ordered_del_inode(ip);
}
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
gfs2_trans_add_meta(ip->i_gl, dibh);
@@ -2093,7 +2093,7 @@ static int do_grow(struct inode *inode, u64 size)
goto do_end_trans;
truncate_setsize(inode, size);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 1a2afa88f8be..61ddd03ea111 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -130,7 +130,7 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
if (ip->i_inode.i_size < offset + size)
i_size_write(&ip->i_inode, offset + size);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
@@ -227,7 +227,7 @@ out:
if (ip->i_inode.i_size < offset + copied)
i_size_write(&ip->i_inode, offset + copied);
- ip->i_inode.i_mtime = inode_set_ctime_current(&ip->i_inode);
+ inode_set_mtime_to_ts(&ip->i_inode, inode_set_ctime_current(&ip->i_inode));
gfs2_trans_add_meta(ip->i_gl, dibh);
gfs2_dinode_out(ip, dibh->b_data);
@@ -1825,7 +1825,7 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
da->bh = NULL;
brelse(bh);
ip->i_entries++;
- ip->i_inode.i_mtime = tv;
+ inode_set_mtime_to_ts(&ip->i_inode, tv);
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
mark_inode_dirty(inode);
@@ -1911,7 +1911,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
if (!dip->i_entries)
gfs2_consist_inode(dip);
dip->i_entries--;
- dip->i_inode.i_mtime = tv;
+ inode_set_mtime_to_ts(&dip->i_inode, tv);
if (d_is_dir(dentry))
drop_nlink(&dip->i_inode);
mark_inode_dirty(&dip->i_inode);
@@ -1952,7 +1952,7 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
dent->de_type = cpu_to_be16(new_type);
brelse(bh);
- dip->i_inode.i_mtime = inode_set_ctime_current(&dip->i_inode);
+ inode_set_mtime_to_ts(&dip->i_inode, inode_set_ctime_current(&dip->i_inode));
mark_inode_dirty_sync(&dip->i_inode);
return 0;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4a280be229a6..3772a5d9e85c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -2719,16 +2719,19 @@ static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
for(;; i->fd++) {
struct inode *inode;
- i->file = task_lookup_next_fd_rcu(i->task, &i->fd);
+ i->file = task_lookup_next_fdget_rcu(i->task, &i->fd);
if (!i->file) {
i->fd = 0;
break;
}
+
inode = file_inode(i->file);
- if (inode->i_sb != i->sb)
- continue;
- if (get_file_rcu(i->file))
+ if (inode->i_sb == i->sb)
break;
+
+ rcu_read_unlock();
+ fput(i->file);
+ rcu_read_lock();
}
rcu_read_unlock();
return i->file;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index f41ca89d216b..e7d334c277a1 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -403,7 +403,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
const struct gfs2_dinode *str = buf;
- struct timespec64 atime;
+ struct timespec64 atime, iatime;
u16 height, depth;
umode_t mode = be32_to_cpu(str->di_mode);
struct inode *inode = &ip->i_inode;
@@ -433,10 +433,11 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
atime.tv_sec = be64_to_cpu(str->di_atime);
atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
- if (timespec64_compare(&inode->i_atime, &atime) < 0)
- inode->i_atime = atime;
- inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
- inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
+ iatime = inode_get_atime(inode);
+ if (timespec64_compare(&iatime, &atime) < 0)
+ inode_set_atime_to_ts(inode, atime);
+ inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
+ be32_to_cpu(str->di_mtime_nsec));
inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
be32_to_cpu(str->di_ctime_nsec));
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 0eac04507904..7fe77bc771e5 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -185,8 +185,9 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
- inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
- inode->i_atime.tv_nsec = 0;
+ inode_set_atime(inode,
+ 1LL << (8 * sizeof(inode_get_atime_sec(inode)) - 1),
+ 0);
glock_set_object(ip->i_gl, ip);
@@ -696,7 +697,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
inode->i_rdev = dev;
inode->i_size = size;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
munge_mode_uid_gid(dip, inode);
check_and_update_goal(dip);
ip->i_goal = dip->i_goal;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 171b2713d2e5..d9854aece15b 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -886,7 +886,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
size = loc + sizeof(struct gfs2_quota);
if (size > inode->i_size)
i_size_write(inode, size);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
set_bit(QDF_REFRESH, &qd->qd_flags);
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 02d93da21b2b..52a878fa7139 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -410,9 +410,9 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_nlink = cpu_to_be32(inode->i_nlink);
str->di_size = cpu_to_be64(i_size_read(inode));
str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
- str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
- str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
- str->di_ctime = cpu_to_be64(inode_get_ctime(inode).tv_sec);
+ str->di_atime = cpu_to_be64(inode_get_atime_sec(inode));
+ str->di_mtime = cpu_to_be64(inode_get_mtime_sec(inode));
+ str->di_ctime = cpu_to_be64(inode_get_ctime_sec(inode));
str->di_goal_meta = cpu_to_be64(ip->i_goal);
str->di_goal_data = cpu_to_be64(ip->i_goal);
@@ -427,9 +427,9 @@ void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
str->di_entries = cpu_to_be32(ip->i_entries);
str->di_eattr = cpu_to_be64(ip->i_eattr);
- str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
- str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
- str->di_ctime_nsec = cpu_to_be32(inode_get_ctime(inode).tv_nsec);
+ str->di_atime_nsec = cpu_to_be32(inode_get_atime_nsec(inode));
+ str->di_mtime_nsec = cpu_to_be32(inode_get_mtime_nsec(inode));
+ str->di_ctime_nsec = cpu_to_be32(inode_get_ctime_nsec(inode));
}
/**
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index ab9c83106932..b4ddf6244586 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -60,8 +60,8 @@ extern const struct export_operations gfs2_export_ops;
extern const struct super_operations gfs2_super_ops;
extern const struct dentry_operations gfs2_dops;
-extern const struct xattr_handler *gfs2_xattr_handlers_max[];
-extern const struct xattr_handler **gfs2_xattr_handlers_min;
+extern const struct xattr_handler * const gfs2_xattr_handlers_max[];
+extern const struct xattr_handler * const *gfs2_xattr_handlers_min;
#endif /* __SUPER_DOT_H__ */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 4fea70c0fe3d..79d5c5559512 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1494,7 +1494,7 @@ static const struct xattr_handler gfs2_xattr_trusted_handler = {
.set = gfs2_xattr_set,
};
-const struct xattr_handler *gfs2_xattr_handlers_max[] = {
+const struct xattr_handler * const gfs2_xattr_handlers_max[] = {
/* GFS2_FS_FORMAT_MAX */
&gfs2_xattr_trusted_handler,
@@ -1504,4 +1504,4 @@ const struct xattr_handler *gfs2_xattr_handlers_max[] = {
NULL,
};
-const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
+const struct xattr_handler * const *gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 6341bb248247..f8395cdd1adf 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -146,7 +146,7 @@ static const struct xattr_handler hfs_type_handler = {
.set = hfs_xattr_set,
};
-const struct xattr_handler *hfs_xattr_handlers[] = {
+const struct xattr_handler * const hfs_xattr_handlers[] = {
&hfs_creator_handler,
&hfs_type_handler,
NULL
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 632c226a3972..d63880e7d9d6 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -133,7 +133,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct i
goto err1;
dir->i_size++;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
hfs_find_exit(&fd);
return 0;
@@ -269,7 +269,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
}
dir->i_size--;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
res = 0;
out:
@@ -337,7 +337,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
dst_dir->i_size++;
- dst_dir->i_mtime = inode_set_ctime_current(dst_dir);
+ inode_set_mtime_to_ts(dst_dir, inode_set_ctime_current(dst_dir));
mark_inode_dirty(dst_dir);
/* finally remove the old entry */
@@ -349,7 +349,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
if (err)
goto out;
src_dir->i_size--;
- src_dir->i_mtime = inode_set_ctime_current(src_dir);
+ inode_set_mtime_to_ts(src_dir, inode_set_ctime_current(src_dir));
mark_inode_dirty(src_dir);
type = entry.type;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 49d02524e667..b5a6ad5df357 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -215,7 +215,7 @@ extern void hfs_evict_inode(struct inode *);
extern void hfs_delete_inode(struct inode *);
/* attr.c */
-extern const struct xattr_handler *hfs_xattr_handlers[];
+extern const struct xattr_handler * const hfs_xattr_handlers[];
/* mdb.c */
extern int hfs_mdb_get(struct super_block *);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index ee349b72cfb3..a7bc4690a780 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -200,7 +200,7 @@ struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
@@ -355,8 +355,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_mode |= S_IWUGO;
inode->i_mode &= ~hsb->s_file_umask;
inode->i_mode |= S_IFREG;
- inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode,
- hfs_m_to_utime(rec->file.MdDat));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->file.MdDat))));
inode->i_op = &hfs_file_inode_operations;
inode->i_fop = &hfs_file_operations;
inode->i_mapping->a_ops = &hfs_aops;
@@ -366,8 +366,8 @@ static int hfs_read_inode(struct inode *inode, void *data)
inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
HFS_I(inode)->fs_blocks = 0;
inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
- inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode,
- hfs_m_to_utime(rec->dir.MdDat));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->dir.MdDat))));
inode->i_op = &hfs_dir_inode_operations;
inode->i_fop = &hfs_dir_operations;
break;
@@ -474,7 +474,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
}
- rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
+ rec.dir.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
rec.dir.Val = cpu_to_be16(inode->i_size - 2);
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
@@ -502,7 +502,7 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
else
rec.file.Flags |= HFS_FIL_LOCK;
hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
- rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
+ rec.file.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
@@ -654,7 +654,7 @@ int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
setattr_copy(&nop_mnt_idmap, inode, attr);
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index dc27d418fbcd..76fa02e3835b 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -28,11 +28,13 @@ static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
/* fix up inode on a timezone change */
diff = sys_tz.tz_minuteswest * 60 - HFS_I(inode)->tz_secondswest;
if (diff) {
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts = inode_get_ctime(inode);
- inode_set_ctime(inode, ctime.tv_sec + diff, ctime.tv_nsec);
- inode->i_atime.tv_sec += diff;
- inode->i_mtime.tv_sec += diff;
+ inode_set_ctime(inode, ts.tv_sec + diff, ts.tv_nsec);
+ ts = inode_get_atime(inode);
+ inode_set_atime(inode, ts.tv_sec + diff, ts.tv_nsec);
+ ts = inode_get_mtime(inode);
+ inode_set_mtime(inode, ts.tv_sec + diff, ts.tv_nsec);
HFS_I(inode)->tz_secondswest += diff;
}
return 1;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index e71ae2537eaa..1995bafee839 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -312,7 +312,7 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
dir->i_size++;
if (S_ISDIR(inode->i_mode))
hfsplus_subfolders_inc(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
hfs_find_exit(&fd);
@@ -417,7 +417,7 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
if (type == HFSPLUS_FILE || type == HFSPLUS_FOLDER) {
@@ -494,7 +494,7 @@ int hfsplus_rename_cat(u32 cnid,
dst_dir->i_size++;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_inc(dst_dir);
- dst_dir->i_mtime = inode_set_ctime_current(dst_dir);
+ inode_set_mtime_to_ts(dst_dir, inode_set_ctime_current(dst_dir));
/* finally remove the old entry */
err = hfsplus_cat_build_key(sb, src_fd.search_key,
@@ -511,7 +511,7 @@ int hfsplus_rename_cat(u32 cnid,
src_dir->i_size--;
if (type == HFSPLUS_FOLDER)
hfsplus_subfolders_dec(src_dir);
- src_dir->i_mtime = inode_set_ctime_current(src_dir);
+ inode_set_mtime_to_ts(src_dir, inode_set_ctime_current(src_dir));
/* remove old thread entry */
hfsplus_cat_build_key_with_cnid(sb, src_fd.search_key, cnid);
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index c65c8c4b03dd..702a0663b1d8 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -267,7 +267,7 @@ static int hfsplus_setattr(struct mnt_idmap *idmap,
}
truncate_setsize(inode, attr->ia_size);
hfsplus_file_truncate(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
setattr_copy(&nop_mnt_idmap, inode, attr);
@@ -392,7 +392,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
inode->i_ino = sbi->next_cnid++;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
set_nlink(inode, 1);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
hip = HFSPLUS_I(inode);
INIT_LIST_HEAD(&hip->open_dir_list);
@@ -521,8 +521,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
hfsplus_get_perms(inode, &folder->permissions, 1);
set_nlink(inode, 1);
inode->i_size = 2 + be32_to_cpu(folder->valence);
- inode->i_atime = hfsp_mt2ut(folder->access_date);
- inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
+ inode_set_atime_to_ts(inode, hfsp_mt2ut(folder->access_date));
+ inode_set_mtime_to_ts(inode,
+ hfsp_mt2ut(folder->content_mod_date));
inode_set_ctime_to_ts(inode,
hfsp_mt2ut(folder->attribute_mod_date));
HFSPLUS_I(inode)->create_date = folder->create_date;
@@ -563,8 +564,9 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
init_special_inode(inode, inode->i_mode,
be32_to_cpu(file->permissions.dev));
}
- inode->i_atime = hfsp_mt2ut(file->access_date);
- inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
+ inode_set_atime_to_ts(inode, hfsp_mt2ut(file->access_date));
+ inode_set_mtime_to_ts(inode,
+ hfsp_mt2ut(file->content_mod_date));
inode_set_ctime_to_ts(inode,
hfsp_mt2ut(file->attribute_mod_date));
HFSPLUS_I(inode)->create_date = file->create_date;
@@ -609,8 +611,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
sizeof(struct hfsplus_cat_folder));
/* simple node checks? */
hfsplus_cat_set_perms(inode, &folder->permissions);
- folder->access_date = hfsp_ut2mt(inode->i_atime);
- folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
+ folder->access_date = hfsp_ut2mt(inode_get_atime(inode));
+ folder->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
folder->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
folder->valence = cpu_to_be32(inode->i_size - 2);
if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
@@ -644,8 +646,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
else
file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
- file->access_date = hfsp_ut2mt(inode->i_atime);
- file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
+ file->access_date = hfsp_ut2mt(inode_get_atime(inode));
+ file->content_mod_date = hfsp_ut2mt(inode_get_mtime(inode));
file->attribute_mod_date = hfsp_ut2mt(inode_get_ctime(inode));
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_file));
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 58021e73c00b..9c9ff6b8c6f7 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -13,7 +13,7 @@
static int hfsplus_removexattr(struct inode *inode, const char *name);
-const struct xattr_handler *hfsplus_xattr_handlers[] = {
+const struct xattr_handler * const hfsplus_xattr_handlers[] = {
&hfsplus_xattr_osx_handler,
&hfsplus_xattr_user_handler,
&hfsplus_xattr_trusted_handler,
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index d14e362b3eba..15cc55e41410 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -17,7 +17,7 @@ extern const struct xattr_handler hfsplus_xattr_user_handler;
extern const struct xattr_handler hfsplus_xattr_trusted_handler;
extern const struct xattr_handler hfsplus_xattr_security_handler;
-extern const struct xattr_handler *hfsplus_xattr_handlers[];
+extern const struct xattr_handler * const hfsplus_xattr_handlers[];
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index dc5a5cea5fae..ea87f24c6c3f 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -513,10 +513,14 @@ static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st)
set_nlink(ino, st->nlink);
i_uid_write(ino, st->uid);
i_gid_write(ino, st->gid);
- ino->i_atime =
- (struct timespec64){ st->atime.tv_sec, st->atime.tv_nsec };
- ino->i_mtime =
- (struct timespec64){ st->mtime.tv_sec, st->mtime.tv_nsec };
+ inode_set_atime_to_ts(ino, (struct timespec64){
+ st->atime.tv_sec,
+ st->atime.tv_nsec,
+ });
+ inode_set_mtime_to_ts(ino, (struct timespec64){
+ st->mtime.tv_sec,
+ st->mtime.tv_nsec,
+ });
inode_set_ctime(ino, st->ctime.tv_sec, st->ctime.tv_nsec);
ino->i_size = st->size;
ino->i_blocks = st->blocks;
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index f36566d61215..49dd585c2b17 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -277,14 +277,16 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned in
* inode.
*/
- if (!inode_get_ctime(result).tv_sec) {
+ if (!inode_get_ctime_sec(result)) {
time64_t csec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date));
inode_set_ctime(result, csec ? csec : 1, 0);
- result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date));
- result->i_mtime.tv_nsec = 0;
- result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date));
- result->i_atime.tv_nsec = 0;
+ inode_set_mtime(result,
+ local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)),
+ 0);
+ inode_set_atime(result,
+ local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)),
+ 0);
hpfs_result->i_ea_size = le32_to_cpu(de->ea_size);
if (!hpfs_result->i_ea_mode && de->read_only)
result->i_mode &= ~0222;
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 479166378bae..a59e8fa630db 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -37,8 +37,8 @@ void hpfs_init_inode(struct inode *i)
hpfs_inode->i_dirty = 0;
inode_set_ctime(i, 0, 0);
- i->i_mtime.tv_sec = i->i_mtime.tv_nsec = 0;
- i->i_atime.tv_sec = i->i_atime.tv_nsec = 0;
+ inode_set_mtime(i, 0, 0);
+ inode_set_atime(i, 0, 0);
}
void hpfs_read_inode(struct inode *i)
@@ -230,9 +230,9 @@ void hpfs_write_inode_nolock(struct inode *i)
}
hpfs_write_inode_ea(i, fnode);
if (de) {
- de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
- de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec));
+ de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_mtime_sec(i)));
+ de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_atime_sec(i)));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime_sec(i)));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size);
hpfs_mark_4buffers_dirty(&qbh);
@@ -240,9 +240,9 @@ void hpfs_write_inode_nolock(struct inode *i)
}
if (S_ISDIR(i->i_mode)) {
if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
- de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
- de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
- de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime(i).tv_sec));
+ de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_mtime_sec(i)));
+ de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_atime_sec(i)));
+ de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, inode_get_ctime_sec(i)));
de->read_only = !(i->i_mode & 0222);
de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0);
de->file_size = cpu_to_le32(0);
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index f4eb8d6f5989..9184b4584b01 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -12,10 +12,10 @@
static void hpfs_update_directory_times(struct inode *dir)
{
time64_t t = local_to_gmt(dir->i_sb, local_get_seconds(dir->i_sb));
- if (t == dir->i_mtime.tv_sec &&
- t == inode_get_ctime(dir).tv_sec)
+ if (t == inode_get_mtime_sec(dir) &&
+ t == inode_get_ctime_sec(dir))
return;
- dir->i_mtime = inode_set_ctime(dir, t, 0);
+ inode_set_mtime_to_ts(dir, inode_set_ctime(dir, t, 0));
hpfs_write_inode_nolock(dir);
}
@@ -58,8 +58,8 @@ static int hpfs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
hpfs_i(result)->i_dno = dno;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_mode |= S_IFDIR;
result->i_op = &hpfs_dir_iops;
@@ -164,8 +164,8 @@ static int hpfs_create(struct mnt_idmap *idmap, struct inode *dir,
result->i_fop = &hpfs_file_ops;
set_nlink(result, 1);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
if (dee.read_only)
result->i_mode &= ~0222;
@@ -245,8 +245,8 @@ static int hpfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
hpfs_init_inode(result);
result->i_ino = fno;
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_uid = current_fsuid();
result->i_gid = current_fsgid();
@@ -319,8 +319,8 @@ static int hpfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
result->i_ino = fno;
hpfs_init_inode(result);
hpfs_i(result)->i_parent_dir = dir->i_ino;
- result->i_mtime = result->i_atime =
- inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0);
+ inode_set_mtime_to_ts(result,
+ inode_set_atime_to_ts(result, inode_set_ctime(result, local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date)), 0)));
hpfs_i(result)->i_ea_size = 0;
result->i_mode = S_IFLNK | 0777;
result->i_uid = current_fsuid();
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 758a51564124..6b0ba3c1efba 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -725,10 +725,12 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
if (!de)
hpfs_error(s, "unable to find root dir");
else {
- root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date));
- root->i_atime.tv_nsec = 0;
- root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
- root->i_mtime.tv_nsec = 0;
+ inode_set_atime(root,
+ local_to_gmt(s, le32_to_cpu(de->read_date)),
+ 0);
+ inode_set_mtime(root,
+ local_to_gmt(s, le32_to_cpu(de->write_date)),
+ 0);
inode_set_ctime(root,
local_to_gmt(s, le32_to_cpu(de->creation_date)),
0);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316c4cebd3f3..da217eaba102 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -980,7 +980,7 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
inode->i_mode = S_IFDIR | ctx->mode;
inode->i_uid = ctx->uid;
inode->i_gid = ctx->gid;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
@@ -1024,7 +1024,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
&hugetlbfs_i_mmap_rwsem_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mapping->private_data = resv_map;
info->seals = F_SEAL_SEAL;
switch (mode & S_IFMT) {
@@ -1067,7 +1067,7 @@ static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
if (!inode)
return -ENOSPC;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
d_instantiate(dentry, inode);
dget(dentry);/* Extra count - pin the dentry in core */
return 0;
@@ -1099,7 +1099,7 @@ static int hugetlbfs_tmpfile(struct mnt_idmap *idmap,
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode | S_IFREG, 0);
if (!inode)
return -ENOSPC;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
d_tmpfile(file, inode);
return finish_open_simple(file, 0);
}
@@ -1121,7 +1121,7 @@ static int hugetlbfs_symlink(struct mnt_idmap *idmap,
} else
iput(inode);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
return error;
}
diff --git a/fs/init.c b/fs/init.c
index 9684406a8416..e9387b6c4f30 100644
--- a/fs/init.c
+++ b/fs/init.c
@@ -153,8 +153,7 @@ int __init init_mknod(const char *filename, umode_t mode, unsigned int dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
+ mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mknod(&path, dentry, mode, dev);
if (!error)
error = vfs_mknod(mnt_idmap(path.mnt), path.dentry->d_inode,
@@ -229,8 +228,7 @@ int __init init_mkdir(const char *pathname, umode_t mode)
dentry = kern_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (!IS_POSIXACL(path.dentry->d_inode))
- mode &= ~current_umask();
+ mode = mode_strip_umask(d_inode(path.dentry), mode);
error = security_path_mkdir(&path, dentry, mode);
if (!error)
error = vfs_mkdir(mnt_idmap(path.mnt), path.dentry->d_inode,
diff --git a/fs/inode.c b/fs/inode.c
index 84bc3c76e5cc..4f8984b97df0 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1837,27 +1837,29 @@ EXPORT_SYMBOL(bmap);
static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
struct timespec64 now)
{
- struct timespec64 ctime;
+ struct timespec64 atime, mtime, ctime;
if (!(mnt->mnt_flags & MNT_RELATIME))
return 1;
/*
* Is mtime younger than or equal to atime? If yes, update atime:
*/
- if (timespec64_compare(&inode->i_mtime, &inode->i_atime) >= 0)
+ atime = inode_get_atime(inode);
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&mtime, &atime) >= 0)
return 1;
/*
* Is ctime younger than or equal to atime? If yes, update atime:
*/
ctime = inode_get_ctime(inode);
- if (timespec64_compare(&ctime, &inode->i_atime) >= 0)
+ if (timespec64_compare(&ctime, &atime) >= 0)
return 1;
/*
* Is the previous atime value older than a day? If yes,
* update atime:
*/
- if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
+ if ((long)(now.tv_sec - atime.tv_sec) >= 24*60*60)
return 1;
/*
* Good, we can skip the atime update:
@@ -1888,12 +1890,13 @@ int inode_update_timestamps(struct inode *inode, int flags)
if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
now = inode_set_ctime_current(inode);
if (!timespec64_equal(&now, &ctime))
updated |= S_CTIME;
- if (!timespec64_equal(&now, &inode->i_mtime)) {
- inode->i_mtime = now;
+ if (!timespec64_equal(&now, &mtime)) {
+ inode_set_mtime_to_ts(inode, now);
updated |= S_MTIME;
}
if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, updated))
@@ -1903,8 +1906,10 @@ int inode_update_timestamps(struct inode *inode, int flags)
}
if (flags & S_ATIME) {
- if (!timespec64_equal(&now, &inode->i_atime)) {
- inode->i_atime = now;
+ struct timespec64 atime = inode_get_atime(inode);
+
+ if (!timespec64_equal(&now, &atime)) {
+ inode_set_atime_to_ts(inode, now);
updated |= S_ATIME;
}
}
@@ -1963,7 +1968,7 @@ EXPORT_SYMBOL(inode_update_time);
bool atime_needs_update(const struct path *path, struct inode *inode)
{
struct vfsmount *mnt = path->mnt;
- struct timespec64 now;
+ struct timespec64 now, atime;
if (inode->i_flags & S_NOATIME)
return false;
@@ -1989,7 +1994,8 @@ bool atime_needs_update(const struct path *path, struct inode *inode)
if (!relatime_need_update(mnt, inode, now))
return false;
- if (timespec64_equal(&inode->i_atime, &now))
+ atime = inode_get_atime(inode);
+ if (timespec64_equal(&atime, &now))
return false;
return true;
@@ -2006,7 +2012,7 @@ void touch_atime(const struct path *path)
if (!sb_start_write_trylock(inode->i_sb))
return;
- if (__mnt_want_write(mnt) != 0)
+ if (mnt_get_write_access(mnt) != 0)
goto skip_update;
/*
* File systems can error out when updating inodes if they need to
@@ -2018,7 +2024,7 @@ void touch_atime(const struct path *path)
* of the fs read only, e.g. subvolumes in Btrfs.
*/
inode_update_time(inode, S_ATIME);
- __mnt_drop_write(mnt);
+ mnt_put_write_access(mnt);
skip_update:
sb_end_write(inode->i_sb);
}
@@ -2106,17 +2112,18 @@ static int inode_needs_update_time(struct inode *inode)
{
int sync_it = 0;
struct timespec64 now = current_time(inode);
- struct timespec64 ctime;
+ struct timespec64 ts;
/* First try to exhaust all avenues to not sync */
if (IS_NOCMTIME(inode))
return 0;
- if (!timespec64_equal(&inode->i_mtime, &now))
+ ts = inode_get_mtime(inode);
+ if (!timespec64_equal(&ts, &now))
sync_it = S_MTIME;
- ctime = inode_get_ctime(inode);
- if (!timespec64_equal(&ctime, &now))
+ ts = inode_get_ctime(inode);
+ if (!timespec64_equal(&ts, &now))
sync_it |= S_CTIME;
if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode))
@@ -2131,9 +2138,9 @@ static int __file_update_time(struct file *file, int sync_mode)
struct inode *inode = file_inode(file);
/* try to update time settings */
- if (!__mnt_want_write_file(file)) {
+ if (!mnt_get_write_access_file(file)) {
ret = inode_update_time(inode, sync_mode);
- __mnt_drop_write_file(file);
+ mnt_put_write_access_file(file);
}
return ret;
diff --git a/fs/internal.h b/fs/internal.h
index d64ae03998cc..58e43341aebf 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -73,8 +73,8 @@ extern int sb_prepare_remount_readonly(struct super_block *);
extern void __init mnt_init(void);
-extern int __mnt_want_write_file(struct file *);
-extern void __mnt_drop_write_file(struct file *);
+int mnt_get_write_access_file(struct file *file);
+void mnt_put_write_access_file(struct file *file);
extern void dissolve_on_fput(struct vfsmount *);
extern bool may_mount(void);
@@ -94,14 +94,22 @@ extern void chroot_fs_refs(const struct path *, const struct path *);
struct file *alloc_empty_file(int flags, const struct cred *cred);
struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred);
struct file *alloc_empty_backing_file(int flags, const struct cred *cred);
+void release_empty_file(struct file *f);
+
+static inline void file_put_write_access(struct file *file)
+{
+ put_write_access(file->f_inode);
+ mnt_put_write_access(file->f_path.mnt);
+ if (unlikely(file->f_mode & FMODE_BACKING))
+ mnt_put_write_access(backing_file_user_path(file)->mnt);
+}
static inline void put_file_access(struct file *file)
{
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
i_readcount_dec(file->f_inode);
} else if (file->f_mode & FMODE_WRITER) {
- put_write_access(file->f_inode);
- __mnt_drop_write(file->f_path.mnt);
+ file_put_write_access(file);
}
}
@@ -130,9 +138,9 @@ static inline void sb_start_ro_state_change(struct super_block *sb)
* mnt_is_readonly() making sure if mnt_is_readonly() sees SB_RDONLY
* cleared, it will see s_readonly_remount set.
* For RW->RO transition, the barrier pairs with the barrier in
- * __mnt_want_write() before the mnt_is_readonly() check. The barrier
- * makes sure if __mnt_want_write() sees MNT_WRITE_HOLD already
- * cleared, it will see s_readonly_remount set.
+ * mnt_get_write_access() before the mnt_is_readonly() check.
+ * The barrier makes sure if mnt_get_write_access() sees MNT_WRITE_HOLD
+ * already cleared, it will see s_readonly_remount set.
*/
smp_wmb();
}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 5db54ca29a35..2bc0aa23fde3 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -881,8 +881,10 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
+ bytes = iov_iter_count(i);
+retry:
offset = pos & (chunk - 1);
- bytes = min(chunk - offset, iov_iter_count(i));
+ bytes = min(chunk - offset, bytes);
status = balance_dirty_pages_ratelimited_flags(mapping,
bdp_flags);
if (unlikely(status))
@@ -933,10 +935,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
* halfway through, might be a race with munmap,
* might be severe memory pressure.
*/
- if (copied)
- bytes = copied;
if (chunk > PAGE_SIZE)
chunk /= 2;
+ if (copied) {
+ bytes = copied;
+ goto retry;
+ }
} else {
pos += status;
written += status;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 2ee21286ac8f..3e4d53e26f94 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -1422,8 +1422,8 @@ static int isofs_read_inode(struct inode *inode, int relocated)
inode->i_ino, de->flags[-high_sierra]);
}
#endif
- inode->i_mtime = inode->i_atime =
- inode_set_ctime(inode, iso_date(de->date, high_sierra), 0);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, iso_date(de->date, high_sierra), 0)));
ei->i_first_extent = (isonum_733(de->extent) +
isonum_711(de->ext_attr_length));
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index 348783a70f57..d6c17ad69dee 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -426,16 +426,14 @@ repeat:
0);
}
if (rr->u.TF.flags & TF_MODIFY) {
- inode->i_mtime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_mtime.tv_nsec = 0;
+ inode_set_mtime(inode,
+ iso_date(rr->u.TF.times[cnt++].time, 0),
+ 0);
}
if (rr->u.TF.flags & TF_ACCESS) {
- inode->i_atime.tv_sec =
- iso_date(rr->u.TF.times[cnt++].time,
- 0);
- inode->i_atime.tv_nsec = 0;
+ inode_set_atime(inode,
+ iso_date(rr->u.TF.times[cnt++].time, 0),
+ 0);
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
inode_set_ctime(inode,
@@ -531,9 +529,9 @@ repeat:
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
- inode->i_atime = reloc->i_atime;
+ inode_set_atime_to_ts(inode, inode_get_atime(reloc));
inode_set_ctime_to_ts(inode, inode_get_ctime(reloc));
- inode->i_mtime = reloc->i_mtime;
+ inode_set_mtime_to_ts(inode, inode_get_mtime(reloc));
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 091ab0eaabbe..2b2938970da3 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -204,8 +204,8 @@ static int jffs2_create(struct mnt_idmap *idmap, struct inode *dir_i,
if (ret)
goto fail;
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(ri->ctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(ri->ctime))));
jffs2_free_raw_inode(ri);
@@ -238,7 +238,8 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
if (dead_f->inocache)
set_nlink(d_inode(dentry), dead_f->inocache->pino_nlink);
if (!ret)
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
return ret;
}
/***********************************************************************/
@@ -272,7 +273,8 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
set_nlink(d_inode(old_dentry), ++f->inocache->pino_nlink);
mutex_unlock(&f->sem);
d_instantiate(dentry, d_inode(old_dentry));
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
ihold(d_inode(old_dentry));
}
return ret;
@@ -423,8 +425,8 @@ static int jffs2_symlink (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(rd->mctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
jffs2_free_raw_dirent(rd);
@@ -568,8 +570,8 @@ static int jffs2_mkdir (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(rd->mctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
inc_nlink(dir_i);
jffs2_free_raw_dirent(rd);
@@ -610,7 +612,8 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
dentry->d_name.len, f, now);
if (!ret) {
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i, ITIME(now));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(now)));
clear_nlink(d_inode(dentry));
drop_nlink(dir_i);
}
@@ -746,8 +749,8 @@ static int jffs2_mknod (struct mnt_idmap *idmap, struct inode *dir_i,
goto fail;
}
- dir_i->i_mtime = inode_set_ctime_to_ts(dir_i,
- ITIME(je32_to_cpu(rd->mctime)));
+ inode_set_mtime_to_ts(dir_i,
+ inode_set_ctime_to_ts(dir_i, ITIME(je32_to_cpu(rd->mctime))));
jffs2_free_raw_dirent(rd);
@@ -868,16 +871,18 @@ static int jffs2_rename (struct mnt_idmap *idmap,
* caller won't do it on its own since we are returning an error.
*/
d_invalidate(new_dentry);
- new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i,
- ITIME(now));
+ inode_set_mtime_to_ts(new_dir_i,
+ inode_set_ctime_to_ts(new_dir_i, ITIME(now)));
return ret;
}
if (d_is_dir(old_dentry))
drop_nlink(old_dir_i);
- old_dir_i->i_mtime = inode_set_ctime_to_ts(old_dir_i, ITIME(now));
- new_dir_i->i_mtime = inode_set_ctime_to_ts(new_dir_i, ITIME(now));
+ inode_set_mtime_to_ts(old_dir_i,
+ inode_set_ctime_to_ts(old_dir_i, ITIME(now)));
+ inode_set_mtime_to_ts(new_dir_i,
+ inode_set_ctime_to_ts(new_dir_i, ITIME(now)));
return 0;
}
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 11c66793960e..62ea76da7fdf 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -317,8 +317,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping,
inode->i_size = pos + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_mtime = inode_set_ctime_to_ts(inode,
- ITIME(je32_to_cpu(ri->ctime)));
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime))));
}
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 0403efab4089..d175cccb7c55 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -113,8 +113,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
- ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
- ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
+ ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode_get_atime(inode)));
+ ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode_get_mtime(inode)));
ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode_get_ctime(inode)));
ri->offset = cpu_to_je32(0);
@@ -147,9 +147,9 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
- inode->i_atime = ITIME(je32_to_cpu(ri->atime));
+ inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(ri->atime)));
inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime)));
- inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
+ inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(ri->mtime)));
inode->i_mode = jemode_to_cpu(ri->mode);
i_uid_write(inode, je16_to_cpu(ri->uid));
i_gid_write(inode, je16_to_cpu(ri->gid));
@@ -282,8 +282,8 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
i_uid_write(inode, je16_to_cpu(latest_node.uid));
i_gid_write(inode, je16_to_cpu(latest_node.gid));
inode->i_size = je32_to_cpu(latest_node.isize);
- inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
- inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
+ inode_set_atime_to_ts(inode, ITIME(je32_to_cpu(latest_node.atime)));
+ inode_set_mtime_to_ts(inode, ITIME(je32_to_cpu(latest_node.mtime)));
inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(latest_node.ctime)));
set_nlink(inode, f->inocache->pino_nlink);
@@ -386,8 +386,8 @@ void jffs2_dirty_inode(struct inode *inode, int flags)
iattr.ia_mode = inode->i_mode;
iattr.ia_uid = inode->i_uid;
iattr.ia_gid = inode->i_gid;
- iattr.ia_atime = inode->i_atime;
- iattr.ia_mtime = inode->i_mtime;
+ iattr.ia_atime = inode_get_atime(inode);
+ iattr.ia_mtime = inode_get_mtime(inode);
iattr.ia_ctime = inode_get_ctime(inode);
jffs2_do_setattr(inode, &iattr);
@@ -475,8 +475,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r
inode->i_mode = jemode_to_cpu(ri->mode);
i_gid_write(inode, je16_to_cpu(ri->gid));
i_uid_write(inode, je16_to_cpu(ri->uid));
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
- ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
+ simple_inode_init_ts(inode);
+ ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode_get_mtime(inode)));
inode->i_blocks = 0;
inode->i_size = 0;
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 50727a1ff931..86ab014a349c 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -36,8 +36,8 @@ struct kvec;
#define JFFS2_NOW() JFFS2_CLAMP_TIME(ktime_get_real_seconds())
#define I_SEC(tv) JFFS2_CLAMP_TIME((tv).tv_sec)
#define JFFS2_F_I_CTIME(f) I_SEC(inode_get_ctime(OFNI_EDONI_2SFFJ(f)))
-#define JFFS2_F_I_MTIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_mtime)
-#define JFFS2_F_I_ATIME(f) I_SEC(OFNI_EDONI_2SFFJ(f)->i_atime)
+#define JFFS2_F_I_MTIME(f) I_SEC(inode_get_mtime(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_ATIME(f) I_SEC(inode_get_atime(OFNI_EDONI_2SFFJ(f)))
#define sleep_on_spinunlock(wq, s) \
do { \
DECLARE_WAITQUEUE(__wait, current); \
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 3b6bdc9a49e1..00224f3a8d6e 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -920,7 +920,7 @@ struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
* do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags)
* is an implementation of setxattr handler on jffs2.
* -------------------------------------------------- */
-const struct xattr_handler *jffs2_xattr_handlers[] = {
+const struct xattr_handler * const jffs2_xattr_handlers[] = {
&jffs2_user_xattr_handler,
#ifdef CONFIG_JFFS2_FS_SECURITY
&jffs2_security_xattr_handler,
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 1b5030a3349d..7e7de093ec0a 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -94,7 +94,7 @@ extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname
extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
const char *buffer, size_t size, int flags);
-extern const struct xattr_handler *jffs2_xattr_handlers[];
+extern const struct xattr_handler * const jffs2_xattr_handlers[];
extern const struct xattr_handler jffs2_user_xattr_handler;
extern const struct xattr_handler jffs2_trusted_xattr_handler;
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 920d58a1566b..1a6b5921d17a 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -393,7 +393,7 @@ void jfs_truncate_nolock(struct inode *ip, loff_t length)
break;
}
- ip->i_mtime = inode_set_ctime_current(ip);
+ inode_set_mtime_to_ts(ip, inode_set_ctime_current(ip));
mark_inode_dirty(ip);
txCommit(tid, 1, &ip, 0);
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 923a58422c46..8e87264e56ce 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -3061,10 +3061,10 @@ static int copy_from_dinode(struct dinode * dip, struct inode *ip)
}
ip->i_size = le64_to_cpu(dip->di_size);
- ip->i_atime.tv_sec = le32_to_cpu(dip->di_atime.tv_sec);
- ip->i_atime.tv_nsec = le32_to_cpu(dip->di_atime.tv_nsec);
- ip->i_mtime.tv_sec = le32_to_cpu(dip->di_mtime.tv_sec);
- ip->i_mtime.tv_nsec = le32_to_cpu(dip->di_mtime.tv_nsec);
+ inode_set_atime(ip, le32_to_cpu(dip->di_atime.tv_sec),
+ le32_to_cpu(dip->di_atime.tv_nsec));
+ inode_set_mtime(ip, le32_to_cpu(dip->di_mtime.tv_sec),
+ le32_to_cpu(dip->di_mtime.tv_nsec));
inode_set_ctime(ip, le32_to_cpu(dip->di_ctime.tv_sec),
le32_to_cpu(dip->di_ctime.tv_nsec));
ip->i_blocks = LBLK2PBLK(ip->i_sb, le64_to_cpu(dip->di_nblocks));
@@ -3138,12 +3138,12 @@ static void copy_to_dinode(struct dinode * dip, struct inode *ip)
else /* Leave the original permissions alone */
dip->di_mode = cpu_to_le32(jfs_ip->mode2);
- dip->di_atime.tv_sec = cpu_to_le32(ip->i_atime.tv_sec);
- dip->di_atime.tv_nsec = cpu_to_le32(ip->i_atime.tv_nsec);
- dip->di_ctime.tv_sec = cpu_to_le32(inode_get_ctime(ip).tv_sec);
- dip->di_ctime.tv_nsec = cpu_to_le32(inode_get_ctime(ip).tv_nsec);
- dip->di_mtime.tv_sec = cpu_to_le32(ip->i_mtime.tv_sec);
- dip->di_mtime.tv_nsec = cpu_to_le32(ip->i_mtime.tv_nsec);
+ dip->di_atime.tv_sec = cpu_to_le32(inode_get_atime_sec(ip));
+ dip->di_atime.tv_nsec = cpu_to_le32(inode_get_atime_nsec(ip));
+ dip->di_ctime.tv_sec = cpu_to_le32(inode_get_ctime_sec(ip));
+ dip->di_ctime.tv_nsec = cpu_to_le32(inode_get_ctime_nsec(ip));
+ dip->di_mtime.tv_sec = cpu_to_le32(inode_get_mtime_sec(ip));
+ dip->di_mtime.tv_nsec = cpu_to_le32(inode_get_mtime_nsec(ip));
dip->di_ixpxd = jfs_ip->ixpxd; /* in-memory pxd's are little-endian */
dip->di_acl = jfs_ip->acl; /* as are dxd's */
dip->di_ea = jfs_ip->ea;
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 87594efa7f7c..f10f295d1502 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -97,8 +97,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
jfs_inode->mode2 |= inode->i_mode;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- jfs_inode->otime = inode_get_ctime(inode).tv_sec;
+ simple_inode_init_ts(inode);
+ jfs_inode->otime = inode_get_ctime_sec(inode);
inode->i_generation = JFS_SBI(sb)->gengen++;
jfs_inode->cflag = 0;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index e855b8fde76c..cb6d1fda66a7 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1058,7 +1058,7 @@ void jfs_syncpt(struct jfs_log *log, int hard_sync)
int lmLogOpen(struct super_block *sb)
{
int rc;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct jfs_log *log;
struct jfs_sb_info *sbi = JFS_SBI(sb);
@@ -1070,7 +1070,7 @@ int lmLogOpen(struct super_block *sb)
mutex_lock(&jfs_log_mutex);
list_for_each_entry(log, &jfs_external_logs, journal_list) {
- if (log->bdev->bd_dev == sbi->logdev) {
+ if (log->bdev_handle->bdev->bd_dev == sbi->logdev) {
if (!uuid_equal(&log->uuid, &sbi->loguuid)) {
jfs_warn("wrong uuid on JFS journal");
mutex_unlock(&jfs_log_mutex);
@@ -1100,14 +1100,14 @@ int lmLogOpen(struct super_block *sb)
* file systems to log may have n-to-1 relationship;
*/
- bdev = blkdev_get_by_dev(sbi->logdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
- log, NULL);
- if (IS_ERR(bdev)) {
- rc = PTR_ERR(bdev);
+ bdev_handle = bdev_open_by_dev(sbi->logdev,
+ BLK_OPEN_READ | BLK_OPEN_WRITE, log, NULL);
+ if (IS_ERR(bdev_handle)) {
+ rc = PTR_ERR(bdev_handle);
goto free;
}
- log->bdev = bdev;
+ log->bdev_handle = bdev_handle;
uuid_copy(&log->uuid, &sbi->loguuid);
/*
@@ -1141,7 +1141,7 @@ journal_found:
lbmLogShutdown(log);
close: /* close external log device */
- blkdev_put(bdev, log);
+ bdev_release(bdev_handle);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
@@ -1162,7 +1162,7 @@ static int open_inline_log(struct super_block *sb)
init_waitqueue_head(&log->syncwait);
set_bit(log_INLINELOG, &log->flag);
- log->bdev = sb->s_bdev;
+ log->bdev_handle = sb->s_bdev_handle;
log->base = addressPXD(&JFS_SBI(sb)->logpxd);
log->size = lengthPXD(&JFS_SBI(sb)->logpxd) >>
(L2LOGPSIZE - sb->s_blocksize_bits);
@@ -1436,7 +1436,7 @@ int lmLogClose(struct super_block *sb)
{
struct jfs_sb_info *sbi = JFS_SBI(sb);
struct jfs_log *log = sbi->log;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
int rc = 0;
jfs_info("lmLogClose: log:0x%p", log);
@@ -1482,10 +1482,10 @@ int lmLogClose(struct super_block *sb)
* external log as separate logical volume
*/
list_del(&log->journal_list);
- bdev = log->bdev;
+ bdev_handle = log->bdev_handle;
rc = lmLogShutdown(log);
- blkdev_put(bdev, log);
+ bdev_release(bdev_handle);
kfree(log);
@@ -1972,7 +1972,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bp->l_flag |= lbmREAD;
- bio = bio_alloc(log->bdev, 1, REQ_OP_READ, GFP_NOFS);
+ bio = bio_alloc(log->bdev_handle->bdev, 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
@@ -2110,10 +2110,15 @@ static void lbmStartIO(struct lbuf * bp)
{
struct bio *bio;
struct jfs_log *log = bp->l_log;
+ struct block_device *bdev = NULL;
jfs_info("lbmStartIO");
- bio = bio_alloc(log->bdev, 1, REQ_OP_WRITE | REQ_SYNC, GFP_NOFS);
+ if (!log->no_integrity)
+ bdev = log->bdev_handle->bdev;
+
+ bio = bio_alloc(bdev, 1, REQ_OP_WRITE | REQ_SYNC,
+ GFP_NOFS);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
__bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
diff --git a/fs/jfs/jfs_logmgr.h b/fs/jfs/jfs_logmgr.h
index 805877ce5020..84aa2d253907 100644
--- a/fs/jfs/jfs_logmgr.h
+++ b/fs/jfs/jfs_logmgr.h
@@ -356,7 +356,7 @@ struct jfs_log {
* before writing syncpt.
*/
struct list_head journal_list; /* Global list */
- struct block_device *bdev; /* 4: log lv pointer */
+ struct bdev_handle *bdev_handle; /* 4: log lv pointer */
int serial; /* 4: log mount serial number */
s64 base; /* @8: log extent address (inline log ) */
diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c
index b83aae56a1f2..415eb65a36ff 100644
--- a/fs/jfs/jfs_mount.c
+++ b/fs/jfs/jfs_mount.c
@@ -430,7 +430,8 @@ int updateSuper(struct super_block *sb, uint state)
if (state == FM_MOUNT) {
/* record log's dev_t and mount serial number */
- j_sb->s_logdev = cpu_to_le32(new_encode_dev(sbi->log->bdev->bd_dev));
+ j_sb->s_logdev = cpu_to_le32(
+ new_encode_dev(sbi->log->bdev_handle->bdev->bd_dev));
j_sb->s_logserial = cpu_to_le32(sbi->log->serial);
} else if (state == FM_CLEAN) {
/*
diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h
index 0d33816d251d..ec67d8554d2c 100644
--- a/fs/jfs/jfs_xattr.h
+++ b/fs/jfs/jfs_xattr.h
@@ -46,7 +46,7 @@ extern int __jfs_setxattr(tid_t, struct inode *, const char *, const void *,
extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t jfs_listxattr(struct dentry *, char *, size_t);
-extern const struct xattr_handler *jfs_xattr_handlers[];
+extern const struct xattr_handler * const jfs_xattr_handlers[];
#ifdef CONFIG_JFS_SECURITY
extern int jfs_init_security(tid_t, struct inode *, struct inode *,
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 57d7a4300210..d68a4e6ac345 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -149,7 +149,7 @@ static int jfs_create(struct mnt_idmap *idmap, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
@@ -284,7 +284,7 @@ static int jfs_mkdir(struct mnt_idmap *idmap, struct inode *dip,
/* update parent directory inode */
inc_nlink(dip); /* for '..' from child directory */
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
rc = txCommit(tid, 2, &iplist[0], 0);
@@ -390,7 +390,7 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
/* update parent directory's link count corresponding
* to ".." entry of the target directory deleted
*/
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
inode_dec_link_count(dip);
/*
@@ -512,7 +512,8 @@ static int jfs_unlink(struct inode *dip, struct dentry *dentry)
ASSERT(ip->i_nlink);
- dip->i_mtime = inode_set_ctime_to_ts(dip, inode_set_ctime_current(ip));
+ inode_set_mtime_to_ts(dip,
+ inode_set_ctime_to_ts(dip, inode_set_ctime_current(ip)));
mark_inode_dirty(dip);
/* update target's inode */
@@ -828,7 +829,7 @@ static int jfs_link(struct dentry *old_dentry,
/* update object inode */
inc_nlink(ip); /* for new link */
inode_set_ctime_current(ip);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ihold(ip);
@@ -1028,7 +1029,7 @@ static int jfs_symlink(struct mnt_idmap *idmap, struct inode *dip,
mark_inode_dirty(ip);
- dip->i_mtime = inode_set_ctime_current(dip);
+ inode_set_mtime_to_ts(dip, inode_set_ctime_current(dip));
mark_inode_dirty(dip);
/*
* commit update of parent directory and link object
@@ -1271,7 +1272,7 @@ static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
inode_set_ctime_current(old_ip);
mark_inode_dirty(old_ip);
- new_dir->i_mtime = inode_set_ctime_current(new_dir);
+ inode_set_mtime_to_ts(new_dir, inode_set_ctime_current(new_dir));
mark_inode_dirty(new_dir);
/* Build list of inodes modified by this transaction */
@@ -1283,7 +1284,8 @@ static int jfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (old_dir != new_dir) {
iplist[ipcount++] = new_dir;
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir,
+ inode_set_ctime_current(old_dir));
mark_inode_dirty(old_dir);
}
@@ -1416,7 +1418,7 @@ static int jfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
mark_inode_dirty(ip);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 2e2f7f6d36a0..966826c394ee 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -818,7 +818,7 @@ out:
}
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
inode_unlock(inode);
return len - towrite;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 8577ad494e05..0fb7afac298e 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -985,7 +985,7 @@ static const struct xattr_handler jfs_trusted_xattr_handler = {
.set = jfs_xattr_set,
};
-const struct xattr_handler *jfs_xattr_handlers[] = {
+const struct xattr_handler * const jfs_xattr_handlers[] = {
&jfs_os2_xattr_handler,
&jfs_user_xattr_handler,
&jfs_security_xattr_handler,
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 922719a343a7..b83054da68b3 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -151,7 +151,7 @@ ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
{
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
static inline void set_inode_attr(struct inode *inode,
@@ -159,8 +159,8 @@ static inline void set_inode_attr(struct inode *inode,
{
inode->i_uid = attrs->ia_uid;
inode->i_gid = attrs->ia_gid;
- inode->i_atime = attrs->ia_atime;
- inode->i_mtime = attrs->ia_mtime;
+ inode_set_atime_to_ts(inode, attrs->ia_atime);
+ inode_set_mtime_to_ts(inode, attrs->ia_mtime);
inode_set_ctime_to_ts(inode, attrs->ia_ctime);
}
@@ -445,7 +445,7 @@ static const struct xattr_handler kernfs_user_xattr_handler = {
.set = kernfs_vfs_user_xattr_set,
};
-const struct xattr_handler *kernfs_xattr_handlers[] = {
+const struct xattr_handler * const kernfs_xattr_handlers[] = {
&kernfs_trusted_xattr_handler,
&kernfs_security_xattr_handler,
&kernfs_user_xattr_handler,
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index a9b854cdfdb5..237f2764b941 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -127,7 +127,7 @@ extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache;
/*
* inode.c
*/
-extern const struct xattr_handler *kernfs_xattr_handlers[];
+extern const struct xattr_handler * const kernfs_xattr_handlers[];
void kernfs_evict_inode(struct inode *inode);
int kernfs_iop_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask);
diff --git a/fs/libfs.c b/fs/libfs.c
index 37f2d34ee090..abe2b5a40ba1 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -541,7 +541,8 @@ void simple_recursive_removal(struct dentry *dentry,
dput(victim); // unpin it
}
if (victim == dentry) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
if (d_is_dir(dentry))
drop_nlink(inode);
inode_unlock(inode);
@@ -582,7 +583,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc)
*/
root->i_ino = 1;
root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
- root->i_atime = root->i_mtime = inode_set_ctime_current(root);
+ simple_inode_init_ts(root);
s->s_root = d_make_root(root);
if (!s->s_root)
return -ENOMEM;
@@ -638,8 +639,8 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
{
struct inode *inode = d_inode(old_dentry);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inc_nlink(inode);
ihold(inode);
dget(dentry);
@@ -673,8 +674,8 @@ int simple_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
drop_nlink(inode);
dput(dentry);
return 0;
@@ -709,9 +710,10 @@ void simple_rename_timestamp(struct inode *old_dir, struct dentry *old_dentry,
{
struct inode *newino = d_inode(new_dentry);
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
if (new_dir != old_dir)
- new_dir->i_mtime = inode_set_ctime_current(new_dir);
+ inode_set_mtime_to_ts(new_dir,
+ inode_set_ctime_current(new_dir));
inode_set_ctime_current(d_inode(old_dentry));
if (newino)
inode_set_ctime_current(newino);
@@ -926,7 +928,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
*/
inode->i_ino = 1;
inode->i_mode = S_IFDIR | 0755;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
set_nlink(inode, 2);
@@ -952,7 +954,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
goto out;
}
inode->i_mode = S_IFREG | files->mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_fop = files->ops;
inode->i_ino = i;
d_add(dentry, inode);
@@ -1520,7 +1522,7 @@ struct inode *alloc_anon_inode(struct super_block *s)
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_flags |= S_PRIVATE;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
return inode;
}
EXPORT_SYMBOL(alloc_anon_inode);
@@ -1912,3 +1914,20 @@ ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
return direct_written + buffered_written;
}
EXPORT_SYMBOL_GPL(direct_write_fallback);
+
+/**
+ * simple_inode_init_ts - initialize the timestamps for a new inode
+ * @inode: inode to be initialized
+ *
+ * When a new inode is created, most filesystems set the timestamps to the
+ * current time. Add a helper to do this.
+ */
+struct timespec64 simple_inode_init_ts(struct inode *inode)
+{
+ struct timespec64 ts = inode_set_ctime_current(inode);
+
+ inode_set_atime_to_ts(inode, ts);
+ inode_set_mtime_to_ts(inode, ts);
+ return ts;
+}
+EXPORT_SYMBOL(simple_inode_init_ts);
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 6579948070a4..81be07c1d3d1 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -24,7 +24,6 @@
#include <linux/uio.h>
#include <linux/smp.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/inetdevice.h>
@@ -135,11 +134,11 @@ lockd(void *vrqstp)
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
*/
- while (!kthread_should_stop()) {
+ while (!svc_thread_should_stop(rqstp)) {
/* update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nlm_max_connections;
- nlmsvc_retry_blocked();
+ nlmsvc_retry_blocked(rqstp);
svc_recv(rqstp);
}
if (nlmsvc_ops)
@@ -373,7 +372,9 @@ static void lockd_put(void)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
+ svc_get(nlmsvc_serv);
svc_set_num_threads(nlmsvc_serv, NULL, 0);
+ svc_put(nlmsvc_serv);
timer_delete_sync(&nlmsvc_retry);
nlmsvc_serv = NULL;
dprintk("lockd_down: service destroyed\n");
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 43aeba9de55c..2dc10900ad1c 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -30,7 +30,6 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
-#include <linux/kthread.h>
#include <linux/exportfs.h>
#define NLMDBG_FACILITY NLMDBG_SVCLOCK
@@ -481,9 +480,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock, int wait,
struct nlm_cookie *cookie, int reclaim)
{
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct inode *inode = nlmsvc_file_inode(file);
-#endif
struct nlm_block *block = NULL;
int error;
int mode;
@@ -497,7 +494,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_end,
wait);
- if (nlmsvc_file_file(file)->f_op->lock) {
+ if (!exportfs_lock_op_is_async(inode->i_sb->s_export_op)) {
async_block = wait;
wait = 0;
}
@@ -543,6 +540,25 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
+ spin_lock(&nlm_blocked_lock);
+ /*
+ * If this is a lock request for an already pending
+ * lock request we return nlm_lck_blocked without calling
+ * vfs_lock_file() again. Otherwise we have two pending
+ * requests on the underlaying ->lock() implementation but
+ * only one nlm_block to being granted by lm_grant().
+ */
+ if (exportfs_lock_op_is_async(inode->i_sb->s_export_op) &&
+ !list_empty(&block->b_list)) {
+ spin_unlock(&nlm_blocked_lock);
+ ret = nlm_lck_blocked;
+ goto out;
+ }
+
+ /* Append to list of blocked */
+ nlmsvc_insert_block_locked(block, NLM_NEVER);
+ spin_unlock(&nlm_blocked_lock);
+
if (!wait)
lock->fl.fl_flags &= ~FL_SLEEP;
mode = lock_to_openmode(&lock->fl);
@@ -552,16 +568,12 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
dprintk("lockd: vfs_lock_file returned %d\n", error);
switch (error) {
case 0:
+ nlmsvc_remove_block(block);
ret = nlm_granted;
goto out;
case -EAGAIN:
- /*
- * If this is a blocking request for an
- * already pending lock request then we need
- * to put it back on lockd's block list
- */
- if (wait)
- break;
+ if (!wait)
+ nlmsvc_remove_block(block);
ret = async_block ? nlm_lck_blocked : nlm_lck_denied;
goto out;
case FILE_LOCK_DEFERRED:
@@ -572,17 +584,16 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
ret = nlmsvc_defer_lock_rqst(rqstp, block);
goto out;
case -EDEADLK:
+ nlmsvc_remove_block(block);
ret = nlm_deadlock;
goto out;
default: /* includes ENOLCK */
+ nlmsvc_remove_block(block);
ret = nlm_lck_denied_nolocks;
goto out;
}
ret = nlm_lck_blocked;
-
- /* Append to list of blocked */
- nlmsvc_insert_block(block, NLM_NEVER);
out:
mutex_unlock(&file->f_mutex);
nlmsvc_release_block(block);
@@ -1020,13 +1031,13 @@ retry_deferred_block(struct nlm_block *block)
* be retransmitted.
*/
void
-nlmsvc_retry_blocked(void)
+nlmsvc_retry_blocked(struct svc_rqst *rqstp)
{
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
struct nlm_block *block;
spin_lock(&nlm_blocked_lock);
- while (!list_empty(&nlm_blocked) && !kthread_should_stop()) {
+ while (!list_empty(&nlm_blocked) && !svc_thread_should_stop(rqstp)) {
block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
if (block->b_when == NLM_NEVER)
diff --git a/fs/locks.c b/fs/locks.c
index 76ad05f8070a..d4e49a990a8d 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2264,11 +2264,13 @@ out:
* To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
* locks, the ->lock() interface may return asynchronously, before the lock has
* been granted or denied by the underlying filesystem, if (and only if)
- * lm_grant is set. Callers expecting ->lock() to return asynchronously
- * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
- * the request is for a blocking lock. When ->lock() does return asynchronously,
- * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
- * request completes.
+ * lm_grant is set. Additionally EXPORT_OP_ASYNC_LOCK in export_operations
+ * flags need to be set.
+ *
+ * Callers expecting ->lock() to return asynchronously will only use F_SETLK,
+ * not F_SETLKW; they will set FL_SLEEP if (and only if) the request is for a
+ * blocking lock. When ->lock() does return asynchronously, it must return
+ * FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock request completes.
* If the request is for non-blocking lock the file system should return
* FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
* with the result. If the request timed out the callback routine will return a
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 25c08fbfcb9d..7da66ca184f4 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -251,7 +251,7 @@ struct inode *minix_new_inode(const struct inode *dir, umode_t mode)
}
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = j;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
insert_inode_hash(inode);
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 20f23e6e58ad..62c313fc9a49 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -281,7 +281,7 @@ got_it:
de->inode = inode->i_ino;
}
dir_commit_chunk(page, pos, sbi->s_dirsize);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = minix_handle_dirsync(dir);
out_put:
@@ -313,7 +313,7 @@ int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
else
de->inode = 0;
dir_commit_chunk(page, pos, len);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return minix_handle_dirsync(inode);
}
@@ -436,7 +436,7 @@ int minix_set_link(struct minix_dir_entry *de, struct page *page,
else
de->inode = inode->i_ino;
dir_commit_chunk(page, pos, sbi->s_dirsize);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
return minix_handle_dirsync(dir);
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index df575473c1cc..f8af6c3ae336 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -501,7 +501,8 @@ static struct inode *V1_minix_iget(struct inode *inode)
i_gid_write(inode, raw_inode->i_gid);
set_nlink(inode, raw_inode->i_nlinks);
inode->i_size = raw_inode->i_size;
- inode->i_mtime = inode->i_atime = inode_set_ctime(inode, raw_inode->i_time, 0);
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime(inode, raw_inode->i_time, 0)));
inode->i_blocks = 0;
for (i = 0; i < 9; i++)
minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
@@ -538,11 +539,9 @@ static struct inode *V2_minix_iget(struct inode *inode)
i_gid_write(inode, raw_inode->i_gid);
set_nlink(inode, raw_inode->i_nlinks);
inode->i_size = raw_inode->i_size;
- inode->i_mtime.tv_sec = raw_inode->i_mtime;
- inode->i_atime.tv_sec = raw_inode->i_atime;
+ inode_set_mtime(inode, raw_inode->i_mtime, 0);
+ inode_set_atime(inode, raw_inode->i_atime, 0);
inode_set_ctime(inode, raw_inode->i_ctime, 0);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
inode->i_blocks = 0;
for (i = 0; i < 10; i++)
minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
@@ -589,7 +588,7 @@ static struct buffer_head * V1_minix_update_inode(struct inode * inode)
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
raw_inode->i_nlinks = inode->i_nlink;
raw_inode->i_size = inode->i_size;
- raw_inode->i_time = inode->i_mtime.tv_sec;
+ raw_inode->i_time = inode_get_mtime_sec(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
else for (i = 0; i < 9; i++)
@@ -616,9 +615,9 @@ static struct buffer_head * V2_minix_update_inode(struct inode * inode)
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
raw_inode->i_nlinks = inode->i_nlink;
raw_inode->i_size = inode->i_size;
- raw_inode->i_mtime = inode->i_mtime.tv_sec;
- raw_inode->i_atime = inode->i_atime.tv_sec;
- raw_inode->i_ctime = inode_get_ctime(inode).tv_sec;
+ raw_inode->i_mtime = inode_get_mtime_sec(inode);
+ raw_inode->i_atime = inode_get_atime_sec(inode);
+ raw_inode->i_ctime = inode_get_ctime_sec(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
else for (i = 0; i < 10; i++)
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index ce18ae37c29d..dad131e30c05 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -350,7 +350,7 @@ do_indirects:
}
first_whole++;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
}
diff --git a/fs/namei.c b/fs/namei.c
index 567ee547492b..71c13b2990b4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -188,7 +188,7 @@ getname_flags(const char __user *filename, int flags, int *empty)
}
}
- result->refcnt = 1;
+ atomic_set(&result->refcnt, 1);
/* The empty path is special. */
if (unlikely(!len)) {
if (empty)
@@ -249,7 +249,7 @@ getname_kernel(const char * filename)
memcpy((char *)result->name, filename, len);
result->uptr = NULL;
result->aname = NULL;
- result->refcnt = 1;
+ atomic_set(&result->refcnt, 1);
audit_getname(result);
return result;
@@ -261,9 +261,10 @@ void putname(struct filename *name)
if (IS_ERR(name))
return;
- BUG_ON(name->refcnt <= 0);
+ if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
+ return;
- if (--name->refcnt > 0)
+ if (!atomic_dec_and_test(&name->refcnt))
return;
if (name->name != name->iname) {
@@ -3104,25 +3105,6 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
EXPORT_SYMBOL(unlock_rename);
/**
- * mode_strip_umask - handle vfs umask stripping
- * @dir: parent directory of the new inode
- * @mode: mode of the new inode to be created in @dir
- *
- * Umask stripping depends on whether or not the filesystem supports POSIX
- * ACLs. If the filesystem doesn't support it umask stripping is done directly
- * in here. If the filesystem does support POSIX ACLs umask stripping is
- * deferred until the filesystem calls posix_acl_create().
- *
- * Returns: mode
- */
-static inline umode_t mode_strip_umask(const struct inode *dir, umode_t mode)
-{
- if (!IS_POSIXACL(dir))
- mode &= ~current_umask();
- return mode;
-}
-
-/**
* vfs_prepare_mode - prepare the mode to be used for a new inode
* @idmap: idmap of the mount the inode was found from
* @dir: parent directory of the new inode
@@ -3535,7 +3517,8 @@ static const char *open_last_lookups(struct nameidata *nd,
if (likely(dentry))
goto finish_lookup;
- BUG_ON(nd->flags & LOOKUP_RCU);
+ if (WARN_ON_ONCE(nd->flags & LOOKUP_RCU))
+ return ERR_PTR(-ECHILD);
} else {
/* create side of things */
if (nd->flags & LOOKUP_RCU) {
@@ -3802,7 +3785,10 @@ static struct file *path_openat(struct nameidata *nd,
WARN_ON(1);
error = -EINVAL;
}
- fput(file);
+ if (unlikely(file->f_mode & FMODE_OPENED))
+ fput(file);
+ else
+ release_empty_file(file);
if (error == -EOPENSTALE) {
if (flags & LOOKUP_RCU)
error = -ECHILD;
@@ -4386,11 +4372,9 @@ retry_deleg:
if (!IS_ERR(dentry)) {
/* Why not before? Because we want correct error value */
- if (last.name[last.len])
+ if (last.name[last.len] || d_is_negative(dentry))
goto slashes;
inode = dentry->d_inode;
- if (d_is_negative(dentry))
- goto slashes;
ihold(inode);
error = security_path_unlink(&path, dentry);
if (error)
diff --git a/fs/namespace.c b/fs/namespace.c
index e157efc54023..6bde71735efa 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -330,16 +330,16 @@ static int mnt_is_readonly(struct vfsmount *mnt)
* can determine when writes are able to occur to a filesystem.
*/
/**
- * __mnt_want_write - get write access to a mount without freeze protection
+ * mnt_get_write_access - get write access to a mount without freeze protection
* @m: the mount on which to take a write
*
* This tells the low-level filesystem that a write is about to be performed to
* it, and makes sure that writes are allowed (mnt it read-write) before
* returning success. This operation does not protect against filesystem being
- * frozen. When the write operation is finished, __mnt_drop_write() must be
+ * frozen. When the write operation is finished, mnt_put_write_access() must be
* called. This is effectively a refcount.
*/
-int __mnt_want_write(struct vfsmount *m)
+int mnt_get_write_access(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
int ret = 0;
@@ -386,6 +386,7 @@ int __mnt_want_write(struct vfsmount *m)
return ret;
}
+EXPORT_SYMBOL_GPL(mnt_get_write_access);
/**
* mnt_want_write - get write access to a mount
@@ -401,7 +402,7 @@ int mnt_want_write(struct vfsmount *m)
int ret;
sb_start_write(m->mnt_sb);
- ret = __mnt_want_write(m);
+ ret = mnt_get_write_access(m);
if (ret)
sb_end_write(m->mnt_sb);
return ret;
@@ -409,15 +410,15 @@ int mnt_want_write(struct vfsmount *m)
EXPORT_SYMBOL_GPL(mnt_want_write);
/**
- * __mnt_want_write_file - get write access to a file's mount
+ * mnt_get_write_access_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
- * This is like __mnt_want_write, but if the file is already open for writing it
+ * This is like mnt_get_write_access, but if @file is already open for write it
* skips incrementing mnt_writers (since the open file already has a reference)
* and instead only does the check for emergency r/o remounts. This must be
- * paired with __mnt_drop_write_file.
+ * paired with mnt_put_write_access_file.
*/
-int __mnt_want_write_file(struct file *file)
+int mnt_get_write_access_file(struct file *file)
{
if (file->f_mode & FMODE_WRITER) {
/*
@@ -428,7 +429,7 @@ int __mnt_want_write_file(struct file *file)
return -EROFS;
return 0;
}
- return __mnt_want_write(file->f_path.mnt);
+ return mnt_get_write_access(file->f_path.mnt);
}
/**
@@ -445,7 +446,7 @@ int mnt_want_write_file(struct file *file)
int ret;
sb_start_write(file_inode(file)->i_sb);
- ret = __mnt_want_write_file(file);
+ ret = mnt_get_write_access_file(file);
if (ret)
sb_end_write(file_inode(file)->i_sb);
return ret;
@@ -453,19 +454,20 @@ int mnt_want_write_file(struct file *file)
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/**
- * __mnt_drop_write - give up write access to a mount
+ * mnt_put_write_access - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done
* performing writes to it. Must be matched with
- * __mnt_want_write() call above.
+ * mnt_get_write_access() call above.
*/
-void __mnt_drop_write(struct vfsmount *mnt)
+void mnt_put_write_access(struct vfsmount *mnt)
{
preempt_disable();
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
+EXPORT_SYMBOL_GPL(mnt_put_write_access);
/**
* mnt_drop_write - give up write access to a mount
@@ -477,20 +479,20 @@ void __mnt_drop_write(struct vfsmount *mnt)
*/
void mnt_drop_write(struct vfsmount *mnt)
{
- __mnt_drop_write(mnt);
+ mnt_put_write_access(mnt);
sb_end_write(mnt->mnt_sb);
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
-void __mnt_drop_write_file(struct file *file)
+void mnt_put_write_access_file(struct file *file)
{
if (!(file->f_mode & FMODE_WRITER))
- __mnt_drop_write(file->f_path.mnt);
+ mnt_put_write_access(file->f_path.mnt);
}
void mnt_drop_write_file(struct file *file)
{
- __mnt_drop_write_file(file);
+ mnt_put_write_access_file(file);
sb_end_write(file_inode(file)->i_sb);
}
EXPORT_SYMBOL(mnt_drop_write_file);
@@ -1344,9 +1346,9 @@ void mntput(struct vfsmount *mnt)
{
if (mnt) {
struct mount *m = real_mount(mnt);
- /* avoid cacheline pingpong, hope gcc doesn't get "smart" */
+ /* avoid cacheline pingpong */
if (unlikely(m->mnt_expiry_mark))
- m->mnt_expiry_mark = 0;
+ WRITE_ONCE(m->mnt_expiry_mark, 0);
mntput_no_expire(m);
}
}
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 716bc75e9ed2..b4294a8aa2d4 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -108,7 +108,7 @@ struct pnfs_block_dev {
struct pnfs_block_dev *children;
u64 chunk_size;
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
u64 disk_offset;
u64 pr_key;
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 65cbb5607a5f..f318a05a80e1 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -25,17 +25,17 @@ bl_free_device(struct pnfs_block_dev *dev)
} else {
if (dev->pr_registered) {
const struct pr_ops *ops =
- dev->bdev->bd_disk->fops->pr_ops;
+ dev->bdev_handle->bdev->bd_disk->fops->pr_ops;
int error;
- error = ops->pr_register(dev->bdev, dev->pr_key, 0,
- false);
+ error = ops->pr_register(dev->bdev_handle->bdev,
+ dev->pr_key, 0, false);
if (error)
pr_err("failed to unregister PR key.\n");
}
- if (dev->bdev)
- blkdev_put(dev->bdev, NULL);
+ if (dev->bdev_handle)
+ bdev_release(dev->bdev_handle);
}
}
@@ -169,7 +169,7 @@ static bool bl_map_simple(struct pnfs_block_dev *dev, u64 offset,
map->start = dev->start;
map->len = dev->len;
map->disk_offset = dev->disk_offset;
- map->bdev = dev->bdev;
+ map->bdev = dev->bdev_handle->bdev;
return true;
}
@@ -236,28 +236,26 @@ bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
dev_t dev;
dev = bl_resolve_deviceid(server, v, gfp_mask);
if (!dev)
return -EIO;
- bdev = blkdev_get_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL,
- NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
printk(KERN_WARNING "pNFS: failed to open device %d:%d (%ld)\n",
- MAJOR(dev), MINOR(dev), PTR_ERR(bdev));
- return PTR_ERR(bdev);
+ MAJOR(dev), MINOR(dev), PTR_ERR(bdev_handle));
+ return PTR_ERR(bdev_handle);
}
- d->bdev = bdev;
-
-
- d->len = bdev_nr_bytes(d->bdev);
+ d->bdev_handle = bdev_handle;
+ d->len = bdev_nr_bytes(bdev_handle->bdev);
d->map = bl_map_simple;
printk(KERN_INFO "pNFS: using block device %s\n",
- d->bdev->bd_disk->disk_name);
+ bdev_handle->bdev->bd_disk->disk_name);
return 0;
}
@@ -302,10 +300,10 @@ bl_validate_designator(struct pnfs_block_volume *v)
}
}
-static struct block_device *
+static struct bdev_handle *
bl_open_path(struct pnfs_block_volume *v, const char *prefix)
{
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
const char *devname;
devname = kasprintf(GFP_KERNEL, "/dev/disk/by-id/%s%*phN",
@@ -313,15 +311,15 @@ bl_open_path(struct pnfs_block_volume *v, const char *prefix)
if (!devname)
return ERR_PTR(-ENOMEM);
- bdev = blkdev_get_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE, NULL,
- NULL);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_path(devname, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ NULL, NULL);
+ if (IS_ERR(bdev_handle)) {
pr_warn("pNFS: failed to open device %s (%ld)\n",
- devname, PTR_ERR(bdev));
+ devname, PTR_ERR(bdev_handle));
}
kfree(devname);
- return bdev;
+ return bdev_handle;
}
static int
@@ -329,7 +327,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
{
struct pnfs_block_volume *v = &volumes[idx];
- struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
const struct pr_ops *ops;
int error;
@@ -342,32 +340,32 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
* On other distributions like Debian, the default SCSI by-id path will
* point to the dm-multipath device if one exists.
*/
- bdev = bl_open_path(v, "dm-uuid-mpath-0x");
- if (IS_ERR(bdev))
- bdev = bl_open_path(v, "wwn-0x");
- if (IS_ERR(bdev))
- return PTR_ERR(bdev);
- d->bdev = bdev;
-
- d->len = bdev_nr_bytes(d->bdev);
+ bdev_handle = bl_open_path(v, "dm-uuid-mpath-0x");
+ if (IS_ERR(bdev_handle))
+ bdev_handle = bl_open_path(v, "wwn-0x");
+ if (IS_ERR(bdev_handle))
+ return PTR_ERR(bdev_handle);
+ d->bdev_handle = bdev_handle;
+
+ d->len = bdev_nr_bytes(d->bdev_handle->bdev);
d->map = bl_map_simple;
d->pr_key = v->scsi.pr_key;
pr_info("pNFS: using block device %s (reservation key 0x%llx)\n",
- d->bdev->bd_disk->disk_name, d->pr_key);
+ d->bdev_handle->bdev->bd_disk->disk_name, d->pr_key);
- ops = d->bdev->bd_disk->fops->pr_ops;
+ ops = d->bdev_handle->bdev->bd_disk->fops->pr_ops;
if (!ops) {
pr_err("pNFS: block device %s does not support reservations.",
- d->bdev->bd_disk->disk_name);
+ d->bdev_handle->bdev->bd_disk->disk_name);
error = -EINVAL;
goto out_blkdev_put;
}
- error = ops->pr_register(d->bdev, 0, d->pr_key, true);
+ error = ops->pr_register(d->bdev_handle->bdev, 0, d->pr_key, true);
if (error) {
pr_err("pNFS: failed to register key for block device %s.",
- d->bdev->bd_disk->disk_name);
+ d->bdev_handle->bdev->bd_disk->disk_name);
goto out_blkdev_put;
}
@@ -375,7 +373,7 @@ bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d,
return 0;
out_blkdev_put:
- blkdev_put(d->bdev, NULL);
+ bdev_release(d->bdev_handle);
return error;
}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 466ebf1d41b2..4ffa1f469e90 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -78,7 +78,7 @@ nfs4_callback_svc(void *vrqstp)
set_freezable();
- while (!kthread_freezable_should_stop(NULL))
+ while (!svc_thread_should_stop(rqstp))
svc_recv(rqstp);
svc_exit_thread(rqstp);
@@ -86,45 +86,6 @@ nfs4_callback_svc(void *vrqstp)
}
#if defined(CONFIG_NFS_V4_1)
-/*
- * The callback service for NFSv4.1 callbacks
- */
-static int
-nfs41_callback_svc(void *vrqstp)
-{
- struct svc_rqst *rqstp = vrqstp;
- struct svc_serv *serv = rqstp->rq_server;
- struct rpc_rqst *req;
- int error;
- DEFINE_WAIT(wq);
-
- set_freezable();
-
- while (!kthread_freezable_should_stop(NULL)) {
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE);
- spin_lock_bh(&serv->sv_cb_lock);
- if (!list_empty(&serv->sv_cb_list)) {
- req = list_first_entry(&serv->sv_cb_list,
- struct rpc_rqst, rq_bc_list);
- list_del(&req->rq_bc_list);
- spin_unlock_bh(&serv->sv_cb_lock);
- finish_wait(&serv->sv_cb_waitq, &wq);
- dprintk("Invoking bc_svc_process()\n");
- error = bc_svc_process(serv, req, rqstp);
- dprintk("bc_svc_process() returned w/ error code= %d\n",
- error);
- } else {
- spin_unlock_bh(&serv->sv_cb_lock);
- if (!kthread_should_stop())
- schedule();
- finish_wait(&serv->sv_cb_waitq, &wq);
- }
- }
-
- svc_exit_thread(rqstp);
- return 0;
-}
-
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct svc_serv *serv)
{
@@ -237,10 +198,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
cb_info->users);
threadfn = nfs4_callback_svc;
-#if defined(CONFIG_NFS_V4_1)
- if (minorversion)
- threadfn = nfs41_callback_svc;
-#else
+#if !defined(CONFIG_NFS_V4_1)
if (minorversion)
return ERR_PTR(-ENOTSUPP);
#endif
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 6bed1394d748..96a4923080ae 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -60,7 +60,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
if (nfs_have_writebacks(inode))
res->change_attr++;
res->ctime = inode_get_ctime(inode);
- res->mtime = inode->i_mtime;
+ res->mtime = inode_get_mtime(inode);
res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
args->bitmap[0];
res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index a1dc33864906..ef817a0475ff 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -2520,9 +2520,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
return i;
}
-static int
-ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
{
+ struct pnfs_layout_hdr *lo;
struct nfs4_flexfile_layout *ff_layout;
const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
@@ -2533,11 +2533,14 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
return -ENOMEM;
spin_lock(&args->inode->i_lock);
- ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
- args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
- &args->devinfo[0],
- dev_count,
- NFS4_FF_OP_LAYOUTSTATS);
+ lo = NFS_I(args->inode)->layout;
+ if (lo && pnfs_layout_is_valid(lo)) {
+ ff_layout = FF_LAYOUT_FROM_HDR(lo);
+ args->num_dev = ff_layout_mirror_prepare_stats(
+ &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
+ NFS4_FF_OP_LAYOUTSTATS);
+ } else
+ args->num_dev = 0;
spin_unlock(&args->inode->i_lock);
if (!args->num_dev) {
kfree(args->devinfo);
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 2dc64454492b..5407ab8c8783 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -114,8 +114,8 @@ static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *
struct inode *inode)
{
memset(auxdata, 0, sizeof(*auxdata));
- auxdata->mtime_sec = inode->i_mtime.tv_sec;
- auxdata->mtime_nsec = inode->i_mtime.tv_nsec;
+ auxdata->mtime_sec = inode_get_mtime(inode).tv_sec;
+ auxdata->mtime_nsec = inode_get_mtime(inode).tv_nsec;
auxdata->ctime_sec = inode_get_ctime(inode).tv_sec;
auxdata->ctime_nsec = inode_get_ctime(inode).tv_nsec;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e21c073158e5..ebb8d60e1152 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -512,8 +512,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
} else
init_special_inode(inode, inode->i_mode, fattr->rdev);
- memset(&inode->i_atime, 0, sizeof(inode->i_atime));
- memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
+ inode_set_atime(inode, 0, 0);
+ inode_set_mtime(inode, 0, 0);
inode_set_ctime(inode, 0, 0);
inode_set_iversion_raw(inode, 0);
inode->i_size = 0;
@@ -527,11 +527,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
nfsi->read_cache_jiffies = fattr->time_start;
nfsi->attr_gencount = fattr->gencount;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
@@ -742,9 +742,9 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_ATIME
| NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (attr->ia_valid & ATTR_ATIME_SET)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
@@ -758,9 +758,9 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_MTIME
| NFS_INO_INVALID_CTIME);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (attr->ia_valid & ATTR_MTIME_SET)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
@@ -1451,11 +1451,11 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode_set_ctime_to_ts(inode, fattr->ctime);
}
- ts = inode->i_mtime;
+ ts = inode_get_mtime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
&& (fattr->valid & NFS_ATTR_FATTR_MTIME)
&& timespec64_equal(&ts, &fattr->pre_mtime)) {
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
}
if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
@@ -1506,7 +1506,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
invalid |= NFS_INO_INVALID_CHANGE;
- ts = inode->i_mtime;
+ ts = inode_get_mtime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec64_equal(&ts, &fattr->mtime))
invalid |= NFS_INO_INVALID_MTIME;
@@ -1534,7 +1534,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
invalid |= NFS_INO_INVALID_NLINK;
- ts = inode->i_atime;
+ ts = inode_get_atime(inode);
if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec64_equal(&ts, &fattr->atime))
invalid |= NFS_INO_INVALID_ATIME;
@@ -2002,7 +2002,7 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
}
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PREMTIME) == 0) {
- fattr->pre_mtime = inode->i_mtime;
+ fattr->pre_mtime = inode_get_mtime(inode);
fattr->valid |= NFS_ATTR_FATTR_PREMTIME;
}
if ((fattr->valid & NFS_ATTR_FATTR_SIZE) != 0 &&
@@ -2184,7 +2184,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
- inode->i_mtime = fattr->mtime;
+ inode_set_mtime_to_ts(inode, fattr->mtime);
else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_MTIME;
@@ -2220,7 +2220,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
save_cache_validity & NFS_INO_INVALID_SIZE;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
- inode->i_atime = fattr->atime;
+ inode_set_atime_to_ts(inode, fattr->atime);
else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
nfsi->cache_validity |=
save_cache_validity & NFS_INO_INVALID_ATIME;
diff --git a/fs/nfs/nfs.h b/fs/nfs/nfs.h
index 5ba00610aede..0d3ce0460e35 100644
--- a/fs/nfs/nfs.h
+++ b/fs/nfs/nfs.h
@@ -18,7 +18,7 @@ struct nfs_subversion {
const struct rpc_version *rpc_vers; /* NFS version information */
const struct nfs_rpc_ops *rpc_ops; /* NFS operations */
const struct super_operations *sops; /* NFS Super operations */
- const struct xattr_handler **xattr; /* NFS xattr handlers */
+ const struct xattr_handler * const *xattr; /* NFS xattr handlers */
struct list_head list; /* List of NFS versions */
};
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 063e00aff87e..28704f924612 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -81,7 +81,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
if (status == 0) {
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ nfs_set_cache_invalid(inode,
+ NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
spin_unlock(&inode->i_lock);
}
status = nfs_post_op_update_inode_force_wcc(inode,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 47c5c1f86d66..827d00e2f094 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -315,7 +315,7 @@ extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *,
struct nfs_fh *,
struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
-extern const struct xattr_handler *nfs4_xattr_handlers[];
+extern const struct xattr_handler * const nfs4_xattr_handlers[];
extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
const struct nfs_open_context *ctx,
const struct nfs_lock_context *l_ctx,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7016eaadf555..a654d7234f51 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -8870,8 +8870,6 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
/* Save the EXCHANGE_ID verifier session trunk tests */
memcpy(clp->cl_confirm.data, argp->verifier.data,
sizeof(clp->cl_confirm.data));
- if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
- set_bit(NFS_CS_DS, &clp->cl_flags);
out:
trace_nfs4_exchange_id(clp, status);
rpc_put_task(task);
@@ -10739,7 +10737,7 @@ static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
};
#endif
-const struct xattr_handler *nfs4_xattr_handlers[] = {
+const struct xattr_handler * const nfs4_xattr_handlers[] = {
&nfs4_xattr_nfs4_acl_handler,
#if defined(CONFIG_NFS_V4_1)
&nfs4_xattr_nfs4_dacl_handler,
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 306cba0b9e69..84343aefbbd6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2634,31 +2634,44 @@ pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
return mode == 0;
}
-static int
-pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
+static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
+ void *data)
{
const struct pnfs_layout_range *range = data;
+ const struct cred *cred;
struct pnfs_layout_hdr *lo;
struct inode *inode;
+ nfs4_stateid stateid;
+ enum pnfs_iomode iomode;
+
restart:
rcu_read_lock();
list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
- if (!pnfs_layout_can_be_returned(lo) ||
+ inode = lo->plh_inode;
+ if (!inode || !pnfs_layout_can_be_returned(lo) ||
test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
continue;
- inode = lo->plh_inode;
spin_lock(&inode->i_lock);
- if (!pnfs_should_return_unused_layout(lo, range)) {
+ if (!lo->plh_inode ||
+ !pnfs_should_return_unused_layout(lo, range)) {
spin_unlock(&inode->i_lock);
continue;
}
+ pnfs_get_layout_hdr(lo);
+ pnfs_set_plh_return_info(lo, range->iomode, 0);
+ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ range, 0) != 0 ||
+ !pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode)) {
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+ pnfs_put_layout_hdr(lo);
+ cond_resched();
+ goto restart;
+ }
spin_unlock(&inode->i_lock);
- inode = pnfs_grab_inode_layout_hdr(lo);
- if (!inode)
- continue;
rcu_read_unlock();
- pnfs_mark_layout_for_return(inode, range);
- iput(inode);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
+ pnfs_put_layout_hdr(lo);
cond_resched();
goto restart;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0d6473cb00cb..9b1cfca8112a 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1071,7 +1071,7 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
sb->s_export_op = &nfs_export_ops;
break;
case 4:
- sb->s_flags |= SB_POSIXACL;
+ sb->s_iflags |= SB_I_NOUMASK;
sb->s_time_gran = 1;
sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7720b5e43014..9d82d50ce0b1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -788,6 +788,8 @@ static void nfs_inode_add_request(struct nfs_page *req)
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
+ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
+
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio_file_mapping(folio);
@@ -802,7 +804,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
- atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests);
+ atomic_long_dec(&nfsi->nrequests);
nfs_release_request(req);
}
}
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 6fffc8f03f74..b8736a82e57c 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -12,7 +12,8 @@ nfsd-y += trace.o
nfsd-y += nfssvc.o nfsctl.o nfsfh.o vfs.o \
export.o auth.o lockd.o nfscache.o \
- stats.o filecache.o nfs3proc.o nfs3xdr.o
+ stats.o filecache.o nfs3proc.o nfs3xdr.o \
+ netlink.o
nfsd-$(CONFIG_NFSD_V2) += nfsproc.o nfsxdr.o
nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o
nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 01d7fd108cf3..46fd74d91ea9 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -117,12 +117,13 @@ static __be32
nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
struct iomap *iomaps, int nr_iomaps)
{
+ struct timespec64 mtime = inode_get_mtime(inode);
loff_t new_size = lcp->lc_last_wr + 1;
struct iattr iattr = { .ia_valid = 0 };
int error;
if (lcp->lc_mtime.tv_nsec == UTIME_NOW ||
- timespec64_compare(&lcp->lc_mtime, &inode->i_mtime) < 0)
+ timespec64_compare(&lcp->lc_mtime, &mtime) < 0)
lcp->lc_mtime = current_time(inode);
iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 1ed2f691ebb9..ce78f74715ee 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -16,9 +16,9 @@
__be32
nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp)
+ const struct nfsd4_layoutget *lgp)
{
- struct pnfs_block_extent *b = lgp->lg_content;
+ const struct pnfs_block_extent *b = lgp->lg_content;
int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32);
__be32 *p;
@@ -77,7 +77,7 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
__be32
nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp)
+ const struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_block_deviceaddr *dev = gdp->gd_device;
int len = sizeof(__be32), ret, i;
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index bc5166bfe46b..b0361e8aa9a7 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -51,9 +51,9 @@ struct pnfs_block_deviceaddr {
};
__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp);
+ const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp);
+ const struct nfsd4_layoutget *lgp);
int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
u32 block_size);
int nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 11a0eaa2f914..b7da17e53007 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -339,12 +339,16 @@ static int export_stats_init(struct export_stats *stats)
static void export_stats_reset(struct export_stats *stats)
{
- nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
+ if (stats)
+ nfsd_percpu_counters_reset(stats->counter,
+ EXP_STATS_COUNTERS_NUM);
}
static void export_stats_destroy(struct export_stats *stats)
{
- nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
+ if (stats)
+ nfsd_percpu_counters_destroy(stats->counter,
+ EXP_STATS_COUNTERS_NUM);
}
static void svc_export_put(struct kref *ref)
@@ -353,7 +357,8 @@ static void svc_export_put(struct kref *ref)
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
- export_stats_destroy(&exp->ex_stats);
+ export_stats_destroy(exp->ex_stats);
+ kfree(exp->ex_stats);
kfree(exp->ex_uuid);
kfree_rcu(exp, ex_rcu);
}
@@ -767,13 +772,15 @@ static int svc_export_show(struct seq_file *m,
seq_putc(m, '\t');
seq_escape(m, exp->ex_client->name, " \t\n\\");
if (export_stats) {
- seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
+ struct percpu_counter *counter = exp->ex_stats->counter;
+
+ seq_printf(m, "\t%lld\n", exp->ex_stats->start_time);
seq_printf(m, "\tfh_stale: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE]));
seq_printf(m, "\tio_read: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ]));
seq_printf(m, "\tio_write: %lld\n",
- percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
+ percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE]));
seq_putc(m, '\n');
return 0;
}
@@ -819,7 +826,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
- export_stats_reset(&new->ex_stats);
+ export_stats_reset(new->ex_stats);
}
static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -856,7 +863,14 @@ static struct cache_head *svc_export_alloc(void)
if (!i)
return NULL;
- if (export_stats_init(&i->ex_stats)) {
+ i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL);
+ if (!i->ex_stats) {
+ kfree(i);
+ return NULL;
+ }
+
+ if (export_stats_init(i->ex_stats)) {
+ kfree(i->ex_stats);
kfree(i);
return NULL;
}
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 2df8ae25aad3..ca9dc230ae3d 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -64,10 +64,10 @@ struct svc_export {
struct cache_head h;
struct auth_domain * ex_client;
int ex_flags;
+ int ex_fsid;
struct path ex_path;
kuid_t ex_anon_uid;
kgid_t ex_anon_gid;
- int ex_fsid;
unsigned char * ex_uuid; /* 16 byte fsid */
struct nfsd4_fs_locations ex_fslocs;
uint32_t ex_nflavors;
@@ -76,8 +76,8 @@ struct svc_export {
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
struct rcu_head ex_rcu;
- struct export_stats ex_stats;
unsigned long ex_xprtsec_modes;
+ struct export_stats *ex_stats;
};
/* an "export key" (expkey) maps a filehandlefragement to an
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index ee9c923192e0..07bf219f9ae4 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -989,22 +989,21 @@ nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
struct net *net = SVC_NET(rqstp);
struct nfsd_file *new, *nf;
- const struct cred *cred;
+ bool stale_retry = true;
bool open_retry = true;
struct inode *inode;
__be32 status;
int ret;
+retry:
status = fh_verify(rqstp, fhp, S_IFREG,
may_flags|NFSD_MAY_OWNER_OVERRIDE);
if (status != nfs_ok)
return status;
inode = d_inode(fhp->fh_dentry);
- cred = get_current_cred();
-retry:
rcu_read_lock();
- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
rcu_read_unlock();
if (nf) {
@@ -1026,7 +1025,7 @@ retry:
rcu_read_lock();
spin_lock(&inode->i_lock);
- nf = nfsd_file_lookup_locked(net, cred, inode, need, want_gc);
+ nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
if (unlikely(nf)) {
spin_unlock(&inode->i_lock);
rcu_read_unlock();
@@ -1058,6 +1057,7 @@ wait_for_construction:
goto construction_err;
}
open_retry = false;
+ fh_put(fhp);
goto retry;
}
this_cpu_inc(nfsd_file_cache_hits);
@@ -1074,7 +1074,6 @@ out:
nfsd_file_check_write_error(nf);
*pnf = nf;
}
- put_cred(cred);
trace_nfsd_file_acquire(rqstp, inode, may_flags, nf, status);
return status;
@@ -1088,8 +1087,20 @@ open_file:
status = nfs_ok;
trace_nfsd_file_opened(nf, status);
} else {
- status = nfsd_open_verified(rqstp, fhp, may_flags,
- &nf->nf_file);
+ ret = nfsd_open_verified(rqstp, fhp, may_flags,
+ &nf->nf_file);
+ if (ret == -EOPENSTALE && stale_retry) {
+ stale_retry = false;
+ nfsd_file_unhash(nf);
+ clear_and_wake_up_bit(NFSD_FILE_PENDING,
+ &nf->nf_flags);
+ if (refcount_dec_and_test(&nf->nf_ref))
+ nfsd_file_free(nf);
+ nf = NULL;
+ fh_put(fhp);
+ goto retry;
+ }
+ status = nfserrno(ret);
trace_nfsd_file_open(nf, status);
}
} else
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
index bb205328e043..aeb71c10ff1b 100644
--- a/fs/nfsd/flexfilelayoutxdr.c
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -17,9 +17,9 @@ struct ff_idmap {
__be32
nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp)
+ const struct nfsd4_layoutget *lgp)
{
- struct pnfs_ff_layout *fl = lgp->lg_content;
+ const struct pnfs_ff_layout *fl = lgp->lg_content;
int len, mirror_len, ds_len, fh_len;
__be32 *p;
@@ -77,7 +77,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
__be32
nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp)
+ const struct nfsd4_getdeviceinfo *gdp)
{
struct pnfs_ff_device_addr *da = gdp->gd_device;
int len;
diff --git a/fs/nfsd/flexfilelayoutxdr.h b/fs/nfsd/flexfilelayoutxdr.h
index 8e195aeca023..6d5a1066a903 100644
--- a/fs/nfsd/flexfilelayoutxdr.h
+++ b/fs/nfsd/flexfilelayoutxdr.h
@@ -43,8 +43,8 @@ struct pnfs_ff_layout {
};
__be32 nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdp);
+ const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
- struct nfsd4_layoutget *lgp);
+ const struct nfsd4_layoutget *lgp);
#endif /* _NFSD_FLEXFILELAYOUTXDR_H */
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
new file mode 100644
index 000000000000..0e1d635ec5f9
--- /dev/null
+++ b/fs/nfsd/netlink.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink.h"
+
+#include <uapi/linux/nfsd_netlink.h>
+
+/* Ops table for nfsd */
+static const struct genl_split_ops nfsd_nl_ops[] = {
+ {
+ .cmd = NFSD_CMD_RPC_STATUS_GET,
+ .start = nfsd_nl_rpc_status_get_start,
+ .dumpit = nfsd_nl_rpc_status_get_dumpit,
+ .done = nfsd_nl_rpc_status_get_done,
+ .flags = GENL_CMD_CAP_DUMP,
+ },
+};
+
+struct genl_family nfsd_nl_family __ro_after_init = {
+ .name = NFSD_FAMILY_NAME,
+ .version = NFSD_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = nfsd_nl_ops,
+ .n_split_ops = ARRAY_SIZE(nfsd_nl_ops),
+};
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
new file mode 100644
index 000000000000..d83dd6bdee92
--- /dev/null
+++ b/fs/nfsd/netlink.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_NFSD_GEN_H
+#define _LINUX_NFSD_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/nfsd_netlink.h>
+
+int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb);
+int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb);
+
+int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+extern struct genl_family nfsd_nl_family;
+
+#endif /* _LINUX_NFSD_GEN_H */
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 268ef57751c4..b78eceebd945 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -171,7 +171,8 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
* + 1 (xdr opaque byte count) = 26
*/
resp->count = argp->count;
- svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
+ svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3) << 2) +
+ resp->count + 4);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
@@ -194,7 +195,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
SVCFH_fmt(&argp->fh),
argp->len,
(unsigned long long) argp->offset,
- argp->stable? " stable" : "");
+ argp->stable ? " stable" : "");
resp->status = nfserr_fbig;
if (argp->offset > (u64)OFFSET_MAX ||
@@ -294,8 +295,8 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
status = nfserr_exist;
break;
case NFS3_CREATE_EXCLUSIVE:
- if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
- d_inode(child)->i_atime.tv_sec == v_atime &&
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
d_inode(child)->i_size == 0) {
break;
}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 4039ffcf90ba..92bc109dabe6 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -84,7 +84,21 @@ static void encode_uint32(struct xdr_stream *xdr, u32 n)
static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap,
size_t len)
{
- WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0);
+ xdr_stream_encode_uint32_array(xdr, bitmap, len);
+}
+
+static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs4_cb_fattr *fattr)
+{
+ fattr->ncf_cb_change = 0;
+ fattr->ncf_cb_fsize = 0;
+ if (bitmap[0] & FATTR4_WORD0_CHANGE)
+ if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0)
+ return -NFSERR_BAD_XDR;
+ if (bitmap[0] & FATTR4_WORD0_SIZE)
+ if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0)
+ return -NFSERR_BAD_XDR;
+ return 0;
}
/*
@@ -358,6 +372,30 @@ encode_cb_recallany4args(struct xdr_stream *xdr,
}
/*
+ * CB_GETATTR4args
+ * struct CB_GETATTR4args {
+ * nfs_fh4 fh;
+ * bitmap4 attr_request;
+ * };
+ *
+ * The size and change attributes are the only one
+ * guaranteed to be serviced by the client.
+ */
+static void
+encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr,
+ struct nfs4_cb_fattr *fattr)
+{
+ struct nfs4_delegation *dp =
+ container_of(fattr, struct nfs4_delegation, dl_cb_fattr);
+ struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle;
+
+ encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR);
+ encode_nfs_fh4(xdr, fh);
+ encode_bitmap4(xdr, fattr->ncf_cb_bmap, ARRAY_SIZE(fattr->ncf_cb_bmap));
+ hdr->nops++;
+}
+
+/*
* CB_SEQUENCE4args
*
* struct CB_SEQUENCE4args {
@@ -493,6 +531,26 @@ static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
+ * 20.1. Operation 3: CB_GETATTR - Get Attributes
+ */
+static void nfs4_xdr_enc_cb_getattr(struct rpc_rqst *req,
+ struct xdr_stream *xdr, const void *data)
+{
+ const struct nfsd4_callback *cb = data;
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_cb_compound_hdr hdr = {
+ .ident = cb->cb_clp->cl_cb_ident,
+ .minorversion = cb->cb_clp->cl_minorversion,
+ };
+
+ encode_cb_compound4args(xdr, &hdr);
+ encode_cb_sequence4args(xdr, cb, &hdr);
+ encode_cb_getattr4args(xdr, &hdr, ncf);
+ encode_cb_nops(&hdr);
+}
+
+/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -548,6 +606,42 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
}
/*
+ * 20.1. Operation 3: CB_GETATTR - Get Attributes
+ */
+static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfsd4_callback *cb = data;
+ struct nfs4_cb_compound_hdr hdr;
+ int status;
+ u32 bitmap[3] = {0};
+ u32 attrlen;
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+
+ status = decode_cb_compound4res(xdr, &hdr);
+ if (unlikely(status))
+ return status;
+
+ status = decode_cb_sequence4res(xdr, cb);
+ if (unlikely(status || cb->cb_seq_status))
+ return status;
+
+ status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status);
+ if (status)
+ return status;
+ if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
+ return -NFSERR_BAD_XDR;
+ if (xdr_stream_decode_u32(xdr, &attrlen) < 0)
+ return -NFSERR_BAD_XDR;
+ if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize)))
+ return -NFSERR_BAD_XDR;
+ status = decode_cb_fattr4(xdr, bitmap, ncf);
+ return status;
+}
+
+/*
* 20.2. Operation 4: CB_RECALL - Recall a Delegation
*/
static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
@@ -855,6 +949,7 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = {
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
PROC(CB_RECALL_ANY, COMPOUND, cb_recall_any, cb_recall_any),
+ PROC(CB_GETATTR, COMPOUND, cb_getattr, cb_getattr),
};
static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index e8a80052cb1b..5e8096bc5eaa 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -515,11 +515,11 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp,
if (!list_empty(&ls->ls_layouts)) {
if (found)
nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
- lrp->lrs_present = 1;
+ lrp->lrs_present = true;
} else {
trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
nfs4_unhash_stid(&ls->ls_stid);
- lrp->lrs_present = 0;
+ lrp->lrs_present = false;
}
spin_unlock(&ls->ls_lock);
@@ -539,7 +539,7 @@ nfsd4_return_client_layouts(struct svc_rqst *rqstp,
struct nfs4_layout *lp, *t;
LIST_HEAD(reaplist);
- lrp->lrs_present = 0;
+ lrp->lrs_present = false;
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 4199ede0583c..6f2d4aa4970d 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -322,8 +322,8 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
status = nfserr_exist;
break;
case NFS4_CREATE_EXCLUSIVE:
- if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
- d_inode(child)->i_atime.tv_sec == v_atime &&
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
d_inode(child)->i_size == 0) {
open->op_created = true;
break; /* subtle */
@@ -331,8 +331,8 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
status = nfserr_exist;
break;
case NFS4_CREATE_EXCLUSIVE4_1:
- if (d_inode(child)->i_mtime.tv_sec == v_mtime &&
- d_inode(child)->i_atime.tv_sec == v_atime &&
+ if (inode_get_mtime_sec(d_inode(child)) == v_mtime &&
+ inode_get_atime_sec(d_inode(child)) == v_atime &&
d_inode(child)->i_size == 0) {
open->op_created = true;
goto set_attr; /* subtle */
@@ -1329,7 +1329,8 @@ extern void nfs_sb_deactive(struct super_block *sb);
* setup a work entry in the ssc delayed unmount list.
*/
static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
- struct nfsd4_ssc_umount_item **nsui)
+ struct nfsd4_ssc_umount_item **nsui,
+ struct svc_rqst *rqstp)
{
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *work = NULL;
@@ -1351,7 +1352,7 @@ try_again:
spin_unlock(&nn->nfsd_ssc_lock);
/* allow 20secs for mount/unmount for now - revisit */
- if (kthread_should_stop() ||
+ if (svc_thread_should_stop(rqstp) ||
(schedule_timeout(20*HZ) == 0)) {
finish_wait(&nn->nfsd_ssc_waitq, &wait);
kfree(work);
@@ -1467,7 +1468,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
goto out_free_rawdata;
snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
- status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui);
+ status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui, rqstp);
if (status)
goto out_free_devname;
if ((*nsui)->nsui_vfsmount)
@@ -1642,6 +1643,7 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy,
if (bytes_total == 0)
bytes_total = ULLONG_MAX;
do {
+ /* Only async copies can be stopped here */
if (kthread_should_stop())
break;
bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos,
@@ -1760,6 +1762,7 @@ static int nfsd4_do_async_copy(void *data)
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
__be32 nfserr;
+ trace_nfsd_copy_do_async(copy);
if (nfsd4_ssc_is_inter(copy)) {
struct file *filp;
@@ -1798,21 +1801,27 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_copy *async_copy = NULL;
+ copy->cp_clp = cstate->clp;
if (nfsd4_ssc_is_inter(copy)) {
+ trace_nfsd_copy_inter(copy);
if (!inter_copy_offload_enable || nfsd4_copy_is_sync(copy)) {
status = nfserr_notsupp;
goto out;
}
status = nfsd4_setup_inter_ssc(rqstp, cstate, copy);
- if (status)
+ if (status) {
+ trace_nfsd_copy_done(copy, status);
return nfserr_offload_denied;
+ }
} else {
+ trace_nfsd_copy_intra(copy);
status = nfsd4_setup_intra_ssc(rqstp, cstate, copy);
- if (status)
+ if (status) {
+ trace_nfsd_copy_done(copy, status);
return status;
+ }
}
- copy->cp_clp = cstate->clp;
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
sizeof(struct knfsd_fh));
if (nfsd4_copy_is_async(copy)) {
@@ -1847,6 +1856,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
copy->nf_dst->nf_file, true);
}
out:
+ trace_nfsd_copy_done(copy, status);
release_copy_files(copy);
return status;
out_err:
@@ -1929,8 +1939,8 @@ nfsd4_copy_notify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
return status;
- cn->cpn_sec = nn->nfsd4_lease;
- cn->cpn_nsec = 0;
+ cn->cpn_lease_time.tv_sec = nn->nfsd4_lease;
+ cn->cpn_lease_time.tv_nsec = 0;
status = nfserrno(-ENOMEM);
cps = nfs4_alloc_init_cpntf_state(nn, stid);
@@ -2347,10 +2357,10 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
mutex_unlock(&ls->ls_mutex);
if (new_size > i_size_read(inode)) {
- lcp->lc_size_chg = 1;
+ lcp->lc_size_chg = true;
lcp->lc_newsize = new_size;
} else {
- lcp->lc_size_chg = 0;
+ lcp->lc_size_chg = false;
}
nfserr = ops->proc_layoutcommit(inode, lcp);
@@ -3200,6 +3210,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCK] = {
.op_func = nfsd4_lock,
+ .op_release = nfsd4_lock_release,
.op_flags = OP_MODIFIES_SOMETHING |
OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCK",
@@ -3208,6 +3219,7 @@ static const struct nfsd4_operation nfsd4_ops[] = {
},
[OP_LOCKT] = {
.op_func = nfsd4_lockt,
+ .op_release = nfsd4_lockt_release,
.op_flags = OP_NONTRIVIAL_ERROR_ENCODE,
.op_name = "OP_LOCKT",
.op_rsize_bop = nfsd4_lock_rsize,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8534693eb6a4..65fd5510323a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -59,7 +59,7 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
-#define all_ones {{~0,~0},~0}
+#define all_ones {{ ~0, ~0}, ~0}
static const stateid_t one_stateid = {
.si_generation = ~0,
.si_opaque = all_ones,
@@ -127,6 +127,7 @@ static void free_session(struct nfsd4_session *);
static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
+static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;
static struct workqueue_struct *laundry_wq;
@@ -297,7 +298,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
nbl = find_blocked_lock(lo, fh, nn);
if (!nbl) {
- nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
+ nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
INIT_LIST_HEAD(&nbl->nbl_list);
INIT_LIST_HEAD(&nbl->nbl_lru);
@@ -1159,6 +1160,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
struct nfs4_clnt_odstate *odstate, u32 dl_type)
{
struct nfs4_delegation *dp;
+ struct nfs4_stid *stid;
long n;
dprintk("NFSD alloc_init_deleg\n");
@@ -1167,9 +1169,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
goto out_dec;
if (delegation_blocked(&fp->fi_fhandle))
goto out_dec;
- dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
- if (dp == NULL)
+ stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
+ if (stid == NULL)
goto out_dec;
+ dp = delegstateid(stid);
/*
* delegation seqid's are never incremented. The 4.1 special
@@ -1187,6 +1190,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
dp->dl_recalled = false;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
&nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
+ nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client,
+ &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR);
+ dp->dl_cb_fattr.ncf_file_modified = false;
+ dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
get_nfs4_file(fp);
dp->dl_stid.sc_file = fp;
return dp;
@@ -2894,11 +2901,56 @@ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
spin_unlock(&nn->client_lock);
}
+static int
+nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
+{
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+
+ ncf->ncf_cb_status = task->tk_status;
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ rpc_delay(task, 2 * HZ);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static void
+nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
+{
+ struct nfs4_cb_fattr *ncf =
+ container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+ nfs4_put_stid(&dp->dl_stid);
+ clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
+ wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
+}
+
static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
.done = nfsd4_cb_recall_any_done,
.release = nfsd4_cb_recall_any_release,
};
+static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
+ .done = nfsd4_cb_getattr_done,
+ .release = nfsd4_cb_getattr_release,
+};
+
+void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
+{
+ struct nfs4_delegation *dp =
+ container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
+
+ if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
+ return;
+ refcount_inc(&dp->dl_stid.sc_count);
+ nfsd4_run_cb(&ncf->ncf_getattr);
+}
+
static struct nfs4_client *create_client(struct xdr_netobj name,
struct svc_rqst *rqstp, nfs4_verifier *verf)
{
@@ -5634,13 +5686,15 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent = NULL;
int cb_up;
int status = 0;
+ struct kstat stat;
+ struct path path;
cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
- open->op_recall = 0;
+ open->op_recall = false;
switch (open->op_claim_type) {
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!cb_up)
- open->op_recall = 1;
+ open->op_recall = true;
break;
case NFS4_OPEN_CLAIM_NULL:
parent = currentfh;
@@ -5671,6 +5725,18 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
+ path.mnt = currentfh->fh_export->ex_path.mnt;
+ path.dentry = currentfh->fh_dentry;
+ if (vfs_getattr(&path, &stat,
+ (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
+ AT_STATX_SYNC_AS_STAT)) {
+ nfs4_put_stid(&dp->dl_stid);
+ destroy_delegation(dp);
+ goto out_no_deleg;
+ }
+ dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
+ dp->dl_cb_fattr.ncf_initial_cinfo =
+ nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
} else {
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
@@ -5682,7 +5748,7 @@ out_no_deleg:
if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
dprintk("NFSD: WARNING: refusing delegation reclaim\n");
- open->op_recall = 1;
+ open->op_recall = true;
}
/* 4.1 client asking for a delegation? */
@@ -7487,6 +7553,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
+ struct super_block *sb;
__be32 status = 0;
int lkflg;
int err;
@@ -7508,6 +7575,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfsd4_lock: permission denied!\n");
return status;
}
+ sb = cstate->current_fh.fh_dentry->d_sb;
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
@@ -7559,7 +7627,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fp = lock_stp->st_stid.sc_file;
switch (lock->lk_type) {
case NFS4_READW_LT:
- if (nfsd4_has_session(cstate))
+ if (nfsd4_has_session(cstate) ||
+ exportfs_lock_op_is_async(sb->s_export_op))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_READ_LT:
@@ -7571,7 +7640,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fl_type = F_RDLCK;
break;
case NFS4_WRITEW_LT:
- if (nfsd4_has_session(cstate))
+ if (nfsd4_has_session(cstate) ||
+ exportfs_lock_op_is_async(sb->s_export_op))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_WRITE_LT:
@@ -7599,7 +7669,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* for file locks), so don't attempt blocking lock notifications
* on those filesystems:
*/
- if (nf->nf_file->f_op->lock)
+ if (!exportfs_lock_op_is_async(sb->s_export_op))
fl_flags &= ~FL_SLEEP;
nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
@@ -7705,6 +7775,14 @@ out:
return status;
}
+void nfsd4_lock_release(union nfsd4_op_u *u)
+{
+ struct nfsd4_lock *lock = &u->lock;
+ struct nfsd4_lock_denied *deny = &lock->lk_denied;
+
+ kfree(deny->ld_owner.data);
+}
+
/*
* The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
* so we do a temporary open here just to get an open file to pass to
@@ -7810,6 +7888,14 @@ out:
return status;
}
+void nfsd4_lockt_release(union nfsd4_op_u *u)
+{
+ struct nfsd4_lockt *lockt = &u->lockt;
+ struct nfsd4_lock_denied *deny = &lockt->lt_denied;
+
+ kfree(deny->ld_owner.data);
+}
+
__be32
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -8403,6 +8489,8 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
* nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
* @rqstp: RPC transaction context
* @inode: file to be checked for a conflict
+ * @modified: return true if file was modified
+ * @size: new size of file if modified is true
*
* This function is called when there is a conflict between a write
* delegation and a change/size GETATTR from another client. The server
@@ -8411,21 +8499,23 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
* delegation before replying to the GETATTR. See RFC 8881 section
* 18.7.4.
*
- * The current implementation does not support CB_GETATTR yet. However
- * this can avoid recalling the delegation could be added in follow up
- * work.
- *
* Returns 0 if there is no conflict; otherwise an nfs_stat
* code is returned.
*/
__be32
-nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
+nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
+ bool *modified, u64 *size)
{
- __be32 status;
struct file_lock_context *ctx;
- struct file_lock *fl;
struct nfs4_delegation *dp;
+ struct nfs4_cb_fattr *ncf;
+ struct file_lock *fl;
+ struct iattr attrs;
+ __be32 status;
+ might_sleep();
+
+ *modified = false;
ctx = locks_inode_context(inode);
if (!ctx)
return 0;
@@ -8452,10 +8542,34 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
break_lease:
spin_unlock(&ctx->flc_lock);
nfsd_stats_wdeleg_getattr_inc();
- status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
- if (status != nfserr_jukebox ||
- !nfsd_wait_for_delegreturn(rqstp, inode))
- return status;
+
+ dp = fl->fl_owner;
+ ncf = &dp->dl_cb_fattr;
+ nfs4_cb_getattr(&dp->dl_cb_fattr);
+ wait_on_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY, TASK_INTERRUPTIBLE);
+ if (ncf->ncf_cb_status) {
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ return status;
+ }
+ if (!ncf->ncf_file_modified &&
+ (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
+ ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
+ ncf->ncf_file_modified = true;
+ if (ncf->ncf_file_modified) {
+ /*
+ * The server would not update the file's metadata
+ * with the client's modified size.
+ */
+ attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
+ attrs.ia_valid = ATTR_MTIME | ATTR_CTIME;
+ setattr_copy(&nop_mnt_idmap, inode, &attrs);
+ mark_inode_dirty(inode);
+ ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
+ *size = ncf->ncf_cur_fsize;
+ *modified = true;
+ }
return 0;
}
break;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 92c7dde148a4..ec4ed6206df1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2530,66 +2530,62 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
return true;
}
-static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
- struct svc_export *exp)
+static __be32 nfsd4_encode_nfs_fh4(struct xdr_stream *xdr,
+ struct knfsd_fh *fh_handle)
{
- if (exp->ex_flags & NFSEXP_V4ROOT) {
- *p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
- *p++ = 0;
- } else
- p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
- return p;
+ return nfsd4_encode_opaque(xdr, fh_handle->fh_raw, fh_handle->fh_size);
}
+/* This is a frequently-encoded type; open-coded for speed */
static __be32 nfsd4_encode_nfstime4(struct xdr_stream *xdr,
- struct timespec64 *tv)
+ const struct timespec64 *tv)
{
__be32 *p;
p = xdr_reserve_space(xdr, XDR_UNIT * 3);
if (!p)
return nfserr_resource;
-
- p = xdr_encode_hyper(p, (s64)tv->tv_sec);
+ p = xdr_encode_hyper(p, tv->tv_sec);
*p = cpu_to_be32(tv->tv_nsec);
return nfs_ok;
}
-/*
- * ctime (in NFSv4, time_metadata) is not writeable, and the client
- * doesn't really care what resolution could theoretically be stored by
- * the filesystem.
- *
- * The client cares how close together changes can be while still
- * guaranteeing ctime changes. For most filesystems (which have
- * timestamps with nanosecond fields) that is limited by the resolution
- * of the time returned from current_time() (which I'm assuming to be
- * 1/HZ).
- */
-static __be32 *encode_time_delta(__be32 *p, struct inode *inode)
+static __be32 nfsd4_encode_specdata4(struct xdr_stream *xdr,
+ unsigned int major, unsigned int minor)
{
- struct timespec64 ts;
- u32 ns;
+ __be32 status;
- ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
- ts = ns_to_timespec64(ns);
+ status = nfsd4_encode_uint32_t(xdr, major);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_uint32_t(xdr, minor);
+}
- p = xdr_encode_hyper(p, ts.tv_sec);
- *p++ = cpu_to_be32(ts.tv_nsec);
+static __be32
+nfsd4_encode_change_info4(struct xdr_stream *xdr, const struct nfsd4_change_info *c)
+{
+ __be32 status;
- return p;
+ status = nfsd4_encode_bool(xdr, c->atomic);
+ if (status != nfs_ok)
+ return status;
+ status = nfsd4_encode_changeid4(xdr, c->before_change);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_changeid4(xdr, c->after_change);
}
-static __be32
-nfsd4_encode_change_info4(struct xdr_stream *xdr, struct nfsd4_change_info *c)
+static __be32 nfsd4_encode_netaddr4(struct xdr_stream *xdr,
+ const struct nfs42_netaddr *addr)
{
- if (xdr_stream_encode_bool(xdr, c->atomic) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u64(xdr, c->before_change) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u64(xdr, c->after_change) < 0)
- return nfserr_resource;
- return nfs_ok;
+ __be32 status;
+
+ /* na_r_netid */
+ status = nfsd4_encode_opaque(xdr, addr->netid, addr->netid_len);
+ if (status != nfs_ok)
+ return status;
+ /* na_r_addr */
+ return nfsd4_encode_opaque(xdr, addr->addr, addr->addr_len);
}
/* Encode as an array of strings the string given with components
@@ -2661,9 +2657,6 @@ static __be32 nfsd4_encode_components(struct xdr_stream *xdr, char sep,
return nfsd4_encode_components_esc(xdr, sep, components, 0, 0);
}
-/*
- * encode a location element of a fs_locations structure
- */
static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
struct nfsd4_fs_location *location)
{
@@ -2676,15 +2669,12 @@ static __be32 nfsd4_encode_fs_location4(struct xdr_stream *xdr,
status = nfsd4_encode_components(xdr, '/', location->path);
if (status)
return status;
- return 0;
+ return nfs_ok;
}
-/*
- * Encode a path in RFC3530 'pathname4' format
- */
-static __be32 nfsd4_encode_path(struct xdr_stream *xdr,
- const struct path *root,
- const struct path *path)
+static __be32 nfsd4_encode_pathname4(struct xdr_stream *xdr,
+ const struct path *root,
+ const struct path *path)
{
struct path cur = *path;
__be32 *p;
@@ -2752,89 +2742,59 @@ out_free:
return err;
}
-static __be32 nfsd4_encode_fsloc_fsroot(struct xdr_stream *xdr,
- struct svc_rqst *rqstp, const struct path *path)
+static __be32 nfsd4_encode_fs_locations4(struct xdr_stream *xdr,
+ struct svc_rqst *rqstp,
+ struct svc_export *exp)
{
+ struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
struct svc_export *exp_ps;
- __be32 res;
+ unsigned int i;
+ __be32 status;
+ /* fs_root */
exp_ps = rqst_find_fsidzero_export(rqstp);
if (IS_ERR(exp_ps))
return nfserrno(PTR_ERR(exp_ps));
- res = nfsd4_encode_path(xdr, &exp_ps->ex_path, path);
+ status = nfsd4_encode_pathname4(xdr, &exp_ps->ex_path, &exp->ex_path);
exp_put(exp_ps);
- return res;
-}
-
-/*
- * encode a fs_locations structure
- */
-static __be32 nfsd4_encode_fs_locations(struct xdr_stream *xdr,
- struct svc_rqst *rqstp, struct svc_export *exp)
-{
- __be32 status;
- int i;
- __be32 *p;
- struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
-
- status = nfsd4_encode_fsloc_fsroot(xdr, rqstp, &exp->ex_path);
- if (status)
+ if (status != nfs_ok)
return status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+
+ /* locations<> */
+ if (xdr_stream_encode_u32(xdr, fslocs->locations_count) != XDR_UNIT)
return nfserr_resource;
- *p++ = cpu_to_be32(fslocs->locations_count);
- for (i=0; i<fslocs->locations_count; i++) {
+ for (i = 0; i < fslocs->locations_count; i++) {
status = nfsd4_encode_fs_location4(xdr, &fslocs->locations[i]);
- if (status)
+ if (status != nfs_ok)
return status;
}
- return 0;
-}
-static u32 nfs4_file_type(umode_t mode)
-{
- switch (mode & S_IFMT) {
- case S_IFIFO: return NF4FIFO;
- case S_IFCHR: return NF4CHR;
- case S_IFDIR: return NF4DIR;
- case S_IFBLK: return NF4BLK;
- case S_IFLNK: return NF4LNK;
- case S_IFREG: return NF4REG;
- case S_IFSOCK: return NF4SOCK;
- default: return NF4BAD;
- }
+ return nfs_ok;
}
-static inline __be32
-nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
- struct nfs4_ace *ace)
+static __be32 nfsd4_encode_nfsace4(struct xdr_stream *xdr, struct svc_rqst *rqstp,
+ struct nfs4_ace *ace)
{
+ __be32 status;
+
+ /* type */
+ status = nfsd4_encode_acetype4(xdr, ace->type);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* flag */
+ status = nfsd4_encode_aceflag4(xdr, ace->flag);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* access mask */
+ status = nfsd4_encode_acemask4(xdr, ace->access_mask & NFS4_ACE_MASK_ALL);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* who */
if (ace->whotype != NFS4_ACL_WHO_NAMED)
return nfs4_acl_write_who(xdr, ace->whotype);
- else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
+ if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP)
return nfsd4_encode_group(xdr, rqstp, ace->who_gid);
- else
- return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
-}
-
-static inline __be32
-nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
-{
- __be32 *p;
- unsigned long i = hweight_long(layout_types);
-
- p = xdr_reserve_space(xdr, 4 + 4 * i);
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(i);
-
- for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
- if (layout_types & (1 << i))
- *p++ = cpu_to_be32(i);
-
- return 0;
+ return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
}
#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
@@ -2906,12 +2866,12 @@ static int nfsd4_get_mounted_on_ino(struct svc_export *exp, u64 *pino)
}
static __be32
-nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
+nfsd4_encode_bitmap4(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
{
__be32 *p;
if (bmval2) {
- p = xdr_reserve_space(xdr, 16);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 4);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(3);
@@ -2919,94 +2879,687 @@ nfsd4_encode_bitmap(struct xdr_stream *xdr, u32 bmval0, u32 bmval1, u32 bmval2)
*p++ = cpu_to_be32(bmval1);
*p++ = cpu_to_be32(bmval2);
} else if (bmval1) {
- p = xdr_reserve_space(xdr, 12);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 3);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(2);
*p++ = cpu_to_be32(bmval0);
*p++ = cpu_to_be32(bmval1);
} else {
- p = xdr_reserve_space(xdr, 8);
+ p = xdr_reserve_space(xdr, XDR_UNIT * 2);
if (!p)
goto out_resource;
*p++ = cpu_to_be32(1);
*p++ = cpu_to_be32(bmval0);
}
- return 0;
+ return nfs_ok;
out_resource:
return nfserr_resource;
}
+struct nfsd4_fattr_args {
+ struct svc_rqst *rqstp;
+ struct svc_fh *fhp;
+ struct svc_export *exp;
+ struct dentry *dentry;
+ struct kstat stat;
+ struct kstatfs statfs;
+ struct nfs4_acl *acl;
+ u64 size;
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ void *context;
+ int contextlen;
+#endif
+ u32 rdattr_err;
+ bool contextsupport;
+ bool ignore_crossmnt;
+};
+
+typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args);
+
+static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4__true(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_bool(xdr, true);
+}
+
+static __be32 nfsd4_encode_fattr4__false(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_bool(xdr, false);
+}
+
+static __be32 nfsd4_encode_fattr4_supported_attrs(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd4_compoundres *resp = args->rqstp->rq_resp;
+ u32 minorversion = resp->cstate.minorversion;
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
+ if (!IS_POSIXACL(d_inode(args->dentry)))
+ supp[0] &= ~FATTR4_WORD0_ACL;
+ if (!args->contextsupport)
+ supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+
+ return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
+}
+
+static __be32 nfsd4_encode_fattr4_type(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!p)
+ return nfserr_resource;
+
+ switch (args->stat.mode & S_IFMT) {
+ case S_IFIFO:
+ *p = cpu_to_be32(NF4FIFO);
+ break;
+ case S_IFCHR:
+ *p = cpu_to_be32(NF4CHR);
+ break;
+ case S_IFDIR:
+ *p = cpu_to_be32(NF4DIR);
+ break;
+ case S_IFBLK:
+ *p = cpu_to_be32(NF4BLK);
+ break;
+ case S_IFLNK:
+ *p = cpu_to_be32(NF4LNK);
+ break;
+ case S_IFREG:
+ *p = cpu_to_be32(NF4REG);
+ break;
+ case S_IFSOCK:
+ *p = cpu_to_be32(NF4SOCK);
+ break;
+ default:
+ return nfserr_serverfault;
+ }
+
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_fh_expire_type(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u32 mask;
+
+ mask = NFS4_FH_PERSISTENT;
+ if (!(args->exp->ex_flags & NFSEXP_NOSUBTREECHECK))
+ mask |= NFS4_FH_VOL_RENAME;
+ return nfsd4_encode_uint32_t(xdr, mask);
+}
+
+static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ const struct svc_export *exp = args->exp;
+ u64 c;
+
+ if (unlikely(exp->ex_flags & NFSEXP_V4ROOT)) {
+ u32 flush_time = convert_to_wallclock(exp->cd->flush_time);
+
+ if (xdr_stream_encode_u32(xdr, flush_time) != XDR_UNIT)
+ return nfserr_resource;
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
+ c = nfsd4_change_attribute(&args->stat, d_inode(args->dentry));
+ return nfsd4_encode_changeid4(xdr, c);
+}
+
+static __be32 nfsd4_encode_fattr4_size(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->size);
+}
+
+static __be32 nfsd4_encode_fattr4_fsid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT * 2 + XDR_UNIT * 2);
+ if (!p)
+ return nfserr_resource;
+
+ if (unlikely(args->exp->ex_fslocs.migrated)) {
+ p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
+ xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
+ return nfs_ok;
+ }
+ switch (fsid_source(args->fhp)) {
+ case FSIDSOURCE_FSID:
+ p = xdr_encode_hyper(p, (u64)args->exp->ex_fsid);
+ xdr_encode_hyper(p, (u64)0);
+ break;
+ case FSIDSOURCE_DEV:
+ *p++ = xdr_zero;
+ *p++ = cpu_to_be32(MAJOR(args->stat.dev));
+ *p++ = xdr_zero;
+ *p = cpu_to_be32(MINOR(args->stat.dev));
+ break;
+ case FSIDSOURCE_UUID:
+ xdr_encode_opaque_fixed(p, args->exp->ex_uuid, EX_UUID_LEN);
+ break;
+ }
+
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_lease_time(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(args->rqstp), nfsd_net_id);
+
+ return nfsd4_encode_nfs_lease4(xdr, nn->nfsd4_lease);
+}
+
+static __be32 nfsd4_encode_fattr4_rdattr_error(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->rdattr_err);
+}
+
+static __be32 nfsd4_encode_fattr4_aclsupport(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u32 mask;
+
+ mask = 0;
+ if (IS_POSIXACL(d_inode(args->dentry)))
+ mask = ACL4_SUPPORT_ALLOW_ACL | ACL4_SUPPORT_DENY_ACL;
+ return nfsd4_encode_uint32_t(xdr, mask);
+}
+
+static __be32 nfsd4_encode_fattr4_acl(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfs4_acl *acl = args->acl;
+ struct nfs4_ace *ace;
+ __be32 status;
+
+ /* nfsace4<> */
+ if (!acl) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ } else {
+ if (xdr_stream_encode_u32(xdr, acl->naces) != XDR_UNIT)
+ return nfserr_resource;
+ for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
+ status = nfsd4_encode_nfsace4(xdr, args->rqstp, ace);
+ if (status != nfs_ok)
+ return status;
+ }
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_filehandle(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfs_fh4(xdr, &args->fhp->fh_handle);
+}
+
+static __be32 nfsd4_encode_fattr4_fileid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->stat.ino);
+}
+
+static __be32 nfsd4_encode_fattr4_files_avail(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_ffree);
+}
+
+static __be32 nfsd4_encode_fattr4_files_free(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_ffree);
+}
+
+static __be32 nfsd4_encode_fattr4_files_total(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, args->statfs.f_files);
+}
+
+static __be32 nfsd4_encode_fattr4_fs_locations(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_fs_locations4(xdr, args->rqstp, args->exp);
+}
+
+static __be32 nfsd4_encode_fattr4_maxfilesize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct super_block *sb = args->exp->ex_path.mnt->mnt_sb;
+
+ return nfsd4_encode_uint64_t(xdr, sb->s_maxbytes);
+}
+
+static __be32 nfsd4_encode_fattr4_maxlink(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, 255);
+}
+
+static __be32 nfsd4_encode_fattr4_maxname(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->statfs.f_namelen);
+}
+
+static __be32 nfsd4_encode_fattr4_maxread(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, svc_max_payload(args->rqstp));
+}
+
+static __be32 nfsd4_encode_fattr4_maxwrite(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, svc_max_payload(args->rqstp));
+}
+
+static __be32 nfsd4_encode_fattr4_mode(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_mode4(xdr, args->stat.mode & S_IALLUGO);
+}
+
+static __be32 nfsd4_encode_fattr4_numlinks(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->stat.nlink);
+}
+
+static __be32 nfsd4_encode_fattr4_owner(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_user(xdr, args->rqstp, args->stat.uid);
+}
+
+static __be32 nfsd4_encode_fattr4_owner_group(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_group(xdr, args->rqstp, args->stat.gid);
+}
+
+static __be32 nfsd4_encode_fattr4_rawdev(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_specdata4(xdr, MAJOR(args->stat.rdev),
+ MINOR(args->stat.rdev));
+}
+
+static __be32 nfsd4_encode_fattr4_space_avail(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 avail = (u64)args->statfs.f_bavail * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, avail);
+}
+
+static __be32 nfsd4_encode_fattr4_space_free(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 free = (u64)args->statfs.f_bfree * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, free);
+}
+
+static __be32 nfsd4_encode_fattr4_space_total(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 total = (u64)args->statfs.f_blocks * (u64)args->statfs.f_bsize;
+
+ return nfsd4_encode_uint64_t(xdr, total);
+}
+
+static __be32 nfsd4_encode_fattr4_space_used(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint64_t(xdr, (u64)args->stat.blocks << 9);
+}
+
+static __be32 nfsd4_encode_fattr4_time_access(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.atime);
+}
+
+static __be32 nfsd4_encode_fattr4_time_create(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.btime);
+}
+
+/*
+ * ctime (in NFSv4, time_metadata) is not writeable, and the client
+ * doesn't really care what resolution could theoretically be stored by
+ * the filesystem.
+ *
+ * The client cares how close together changes can be while still
+ * guaranteeing ctime changes. For most filesystems (which have
+ * timestamps with nanosecond fields) that is limited by the resolution
+ * of the time returned from current_time() (which I'm assuming to be
+ * 1/HZ).
+ */
+static __be32 nfsd4_encode_fattr4_time_delta(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ const struct inode *inode = d_inode(args->dentry);
+ u32 ns = max_t(u32, NSEC_PER_SEC/HZ, inode->i_sb->s_time_gran);
+ struct timespec64 ts = ns_to_timespec64(ns);
+
+ return nfsd4_encode_nfstime4(xdr, &ts);
+}
+
+static __be32 nfsd4_encode_fattr4_time_metadata(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.ctime);
+}
+
+static __be32 nfsd4_encode_fattr4_time_modify(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_nfstime4(xdr, &args->stat.mtime);
+}
+
+static __be32 nfsd4_encode_fattr4_mounted_on_fileid(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ u64 ino;
+ int err;
+
+ if (!args->ignore_crossmnt &&
+ args->dentry == args->exp->ex_path.mnt->mnt_root) {
+ err = nfsd4_get_mounted_on_ino(args->exp, &ino);
+ if (err)
+ return nfserrno(err);
+ } else
+ ino = args->stat.ino;
+
+ return nfsd4_encode_uint64_t(xdr, ino);
+}
+
+#ifdef CONFIG_NFSD_PNFS
+
+static __be32 nfsd4_encode_fattr4_fs_layout_types(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ unsigned long mask = args->exp->ex_layout_types;
+ int i;
+
+ /* Hamming weight of @mask is the number of layout types to return */
+ if (xdr_stream_encode_u32(xdr, hweight_long(mask)) != XDR_UNIT)
+ return nfserr_resource;
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (mask & BIT(i)) {
+ /* layouttype4 */
+ if (xdr_stream_encode_u32(xdr, i) != XDR_UNIT)
+ return nfserr_resource;
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_layout_types(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ unsigned long mask = args->exp->ex_layout_types;
+ int i;
+
+ /* Hamming weight of @mask is the number of layout types to return */
+ if (xdr_stream_encode_u32(xdr, hweight_long(mask)) != XDR_UNIT)
+ return nfserr_resource;
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (mask & BIT(i)) {
+ /* layouttype4 */
+ if (xdr_stream_encode_u32(xdr, i) != XDR_UNIT)
+ return nfserr_resource;
+ }
+ return nfs_ok;
+}
+
+static __be32 nfsd4_encode_fattr4_layout_blksize(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_uint32_t(xdr, args->stat.blksize);
+}
+
+#endif
+
+static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ struct nfsd4_compoundres *resp = args->rqstp->rq_resp;
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[resp->cstate.minorversion], sizeof(supp));
+ supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
+ supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
+ supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
+
+ return nfsd4_encode_bitmap4(xdr, supp[0], supp[1], supp[2]);
+}
+
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+static __be32 nfsd4_encode_fattr4_sec_label(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ return nfsd4_encode_security_label(xdr, args->rqstp,
+ args->context, args->contextlen);
+}
+#endif
+
+static __be32 nfsd4_encode_fattr4_xattr_support(struct xdr_stream *xdr,
+ const struct nfsd4_fattr_args *args)
+{
+ int err = xattr_supports_user_prefix(d_inode(args->dentry));
+
+ return nfsd4_encode_bool(xdr, err == 0);
+}
+
+static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = {
+ [FATTR4_SUPPORTED_ATTRS] = nfsd4_encode_fattr4_supported_attrs,
+ [FATTR4_TYPE] = nfsd4_encode_fattr4_type,
+ [FATTR4_FH_EXPIRE_TYPE] = nfsd4_encode_fattr4_fh_expire_type,
+ [FATTR4_CHANGE] = nfsd4_encode_fattr4_change,
+ [FATTR4_SIZE] = nfsd4_encode_fattr4_size,
+ [FATTR4_LINK_SUPPORT] = nfsd4_encode_fattr4__true,
+ [FATTR4_SYMLINK_SUPPORT] = nfsd4_encode_fattr4__true,
+ [FATTR4_NAMED_ATTR] = nfsd4_encode_fattr4__false,
+ [FATTR4_FSID] = nfsd4_encode_fattr4_fsid,
+ [FATTR4_UNIQUE_HANDLES] = nfsd4_encode_fattr4__true,
+ [FATTR4_LEASE_TIME] = nfsd4_encode_fattr4_lease_time,
+ [FATTR4_RDATTR_ERROR] = nfsd4_encode_fattr4_rdattr_error,
+ [FATTR4_ACL] = nfsd4_encode_fattr4_acl,
+ [FATTR4_ACLSUPPORT] = nfsd4_encode_fattr4_aclsupport,
+ [FATTR4_ARCHIVE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CANSETTIME] = nfsd4_encode_fattr4__true,
+ [FATTR4_CASE_INSENSITIVE] = nfsd4_encode_fattr4__false,
+ [FATTR4_CASE_PRESERVING] = nfsd4_encode_fattr4__true,
+ [FATTR4_CHOWN_RESTRICTED] = nfsd4_encode_fattr4__true,
+ [FATTR4_FILEHANDLE] = nfsd4_encode_fattr4_filehandle,
+ [FATTR4_FILEID] = nfsd4_encode_fattr4_fileid,
+ [FATTR4_FILES_AVAIL] = nfsd4_encode_fattr4_files_avail,
+ [FATTR4_FILES_FREE] = nfsd4_encode_fattr4_files_free,
+ [FATTR4_FILES_TOTAL] = nfsd4_encode_fattr4_files_total,
+ [FATTR4_FS_LOCATIONS] = nfsd4_encode_fattr4_fs_locations,
+ [FATTR4_HIDDEN] = nfsd4_encode_fattr4__noop,
+ [FATTR4_HOMOGENEOUS] = nfsd4_encode_fattr4__true,
+ [FATTR4_MAXFILESIZE] = nfsd4_encode_fattr4_maxfilesize,
+ [FATTR4_MAXLINK] = nfsd4_encode_fattr4_maxlink,
+ [FATTR4_MAXNAME] = nfsd4_encode_fattr4_maxname,
+ [FATTR4_MAXREAD] = nfsd4_encode_fattr4_maxread,
+ [FATTR4_MAXWRITE] = nfsd4_encode_fattr4_maxwrite,
+ [FATTR4_MIMETYPE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MODE] = nfsd4_encode_fattr4_mode,
+ [FATTR4_NO_TRUNC] = nfsd4_encode_fattr4__true,
+ [FATTR4_NUMLINKS] = nfsd4_encode_fattr4_numlinks,
+ [FATTR4_OWNER] = nfsd4_encode_fattr4_owner,
+ [FATTR4_OWNER_GROUP] = nfsd4_encode_fattr4_owner_group,
+ [FATTR4_QUOTA_AVAIL_HARD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_QUOTA_AVAIL_SOFT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_QUOTA_USED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RAWDEV] = nfsd4_encode_fattr4_rawdev,
+ [FATTR4_SPACE_AVAIL] = nfsd4_encode_fattr4_space_avail,
+ [FATTR4_SPACE_FREE] = nfsd4_encode_fattr4_space_free,
+ [FATTR4_SPACE_TOTAL] = nfsd4_encode_fattr4_space_total,
+ [FATTR4_SPACE_USED] = nfsd4_encode_fattr4_space_used,
+ [FATTR4_SYSTEM] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_ACCESS] = nfsd4_encode_fattr4_time_access,
+ [FATTR4_TIME_ACCESS_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_BACKUP] = nfsd4_encode_fattr4__noop,
+ [FATTR4_TIME_CREATE] = nfsd4_encode_fattr4_time_create,
+ [FATTR4_TIME_DELTA] = nfsd4_encode_fattr4_time_delta,
+ [FATTR4_TIME_METADATA] = nfsd4_encode_fattr4_time_metadata,
+ [FATTR4_TIME_MODIFY] = nfsd4_encode_fattr4_time_modify,
+ [FATTR4_TIME_MODIFY_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MOUNTED_ON_FILEID] = nfsd4_encode_fattr4_mounted_on_fileid,
+ [FATTR4_DIR_NOTIF_DELAY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_DIRENT_NOTIF_DELAY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_DACL] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SACL] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CHANGE_POLICY] = nfsd4_encode_fattr4__noop,
+ [FATTR4_FS_STATUS] = nfsd4_encode_fattr4__noop,
+
+#ifdef CONFIG_NFSD_PNFS
+ [FATTR4_FS_LAYOUT_TYPES] = nfsd4_encode_fattr4_fs_layout_types,
+ [FATTR4_LAYOUT_HINT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_TYPES] = nfsd4_encode_fattr4_layout_types,
+ [FATTR4_LAYOUT_BLKSIZE] = nfsd4_encode_fattr4_layout_blksize,
+ [FATTR4_LAYOUT_ALIGNMENT] = nfsd4_encode_fattr4__noop,
+#else
+ [FATTR4_FS_LAYOUT_TYPES] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_HINT] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_TYPES] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_BLKSIZE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_LAYOUT_ALIGNMENT] = nfsd4_encode_fattr4__noop,
+#endif
+
+ [FATTR4_FS_LOCATIONS_INFO] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MDSTHRESHOLD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_GET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTEVT_GET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTEVT_SET] = nfsd4_encode_fattr4__noop,
+ [FATTR4_RETENTION_HOLD] = nfsd4_encode_fattr4__noop,
+ [FATTR4_MODE_SET_MASKED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SUPPATTR_EXCLCREAT] = nfsd4_encode_fattr4_suppattr_exclcreat,
+ [FATTR4_FS_CHARSET_CAP] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CLONE_BLKSIZE] = nfsd4_encode_fattr4__noop,
+ [FATTR4_SPACE_FREED] = nfsd4_encode_fattr4__noop,
+ [FATTR4_CHANGE_ATTR_TYPE] = nfsd4_encode_fattr4__noop,
+
+#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ [FATTR4_SEC_LABEL] = nfsd4_encode_fattr4_sec_label,
+#else
+ [FATTR4_SEC_LABEL] = nfsd4_encode_fattr4__noop,
+#endif
+
+ [FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop,
+ [FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support,
+};
+
/*
* Note: @fhp can be NULL; in this case, we might have to compose the filehandle
* ourselves.
*/
static __be32
-nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
- struct svc_export *exp,
- struct dentry *dentry, u32 *bmval,
- struct svc_rqst *rqstp, int ignore_crossmnt)
+nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
+ struct svc_fh *fhp, struct svc_export *exp,
+ struct dentry *dentry, const u32 *bmval,
+ int ignore_crossmnt)
{
- u32 bmval0 = bmval[0];
- u32 bmval1 = bmval[1];
- u32 bmval2 = bmval[2];
- struct kstat stat;
+ struct nfsd4_fattr_args args;
struct svc_fh *tempfh = NULL;
- struct kstatfs statfs;
- __be32 *p, *attrlen_p;
int starting_len = xdr->buf->len;
+ __be32 *attrlen_p, status;
int attrlen_offset;
- u32 dummy;
- u64 dummy64;
- u32 rdattr_err = 0;
- __be32 status;
int err;
- struct nfs4_acl *acl = NULL;
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- void *context = NULL;
- int contextlen;
-#endif
- bool contextsupport = false;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
struct path path = {
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
- struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ union {
+ u32 attrmask[3];
+ unsigned long mask[2];
+ } u;
+ bool file_modified;
+ unsigned long bit;
+ u64 size = 0;
+
+ WARN_ON_ONCE(bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1);
+ WARN_ON_ONCE(!nfsd_attrs_supported(minorversion, bmval));
+
+ args.rqstp = rqstp;
+ args.exp = exp;
+ args.dentry = dentry;
+ args.ignore_crossmnt = (ignore_crossmnt != 0);
- BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
- BUG_ON(!nfsd_attrs_supported(minorversion, bmval));
+ /*
+ * Make a local copy of the attribute bitmap that can be modified.
+ */
+ memset(&u, 0, sizeof(u));
+ u.attrmask[0] = bmval[0];
+ u.attrmask[1] = bmval[1];
+ u.attrmask[2] = bmval[2];
+ args.rdattr_err = 0;
if (exp->ex_fslocs.migrated) {
- status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
+ status = fattr_handle_absent_fs(&u.attrmask[0], &u.attrmask[1],
+ &u.attrmask[2], &args.rdattr_err);
if (status)
goto out;
}
- if (bmval0 & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
- status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry));
+ args.size = 0;
+ if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+ status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
+ &file_modified, &size);
if (status)
goto out;
}
- err = vfs_getattr(&path, &stat,
+ err = vfs_getattr(&path, &args.stat,
STATX_BASIC_STATS | STATX_BTIME | STATX_CHANGE_COOKIE,
AT_STATX_SYNC_AS_STAT);
if (err)
goto out_nfserr;
- if (!(stat.result_mask & STATX_BTIME))
+ args.size = file_modified ? size : args.stat.size;
+
+ if (!(args.stat.result_mask & STATX_BTIME))
/* underlying FS does not offer btime so we can't share it */
- bmval1 &= ~FATTR4_WORD1_TIME_CREATE;
- if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+ u.attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ if ((u.attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
- (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+ (u.attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
- err = vfs_statfs(&path, &statfs);
+ err = vfs_statfs(&path, &args.statfs);
if (err)
goto out_nfserr;
}
- if ((bmval0 & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) && !fhp) {
+ if ((u.attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
+ !fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
if (!tempfh)
@@ -3015,12 +3568,15 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
status = fh_compose(tempfh, exp, dentry, NULL);
if (status)
goto out;
- fhp = tempfh;
- }
- if (bmval0 & FATTR4_WORD0_ACL) {
- err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
+ args.fhp = tempfh;
+ } else
+ args.fhp = fhp;
+
+ args.acl = NULL;
+ if (u.attrmask[0] & FATTR4_WORD0_ACL) {
+ err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
if (err == -EOPNOTSUPP)
- bmval0 &= ~FATTR4_WORD0_ACL;
+ u.attrmask[0] &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
@@ -3028,452 +3584,53 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
goto out_nfserr;
}
+ args.contextsupport = false;
+
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
- bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ args.context = NULL;
+ if ((u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+ u.attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
- &context, &contextlen);
+ &args.context, &args.contextlen);
else
err = -EOPNOTSUPP;
- contextsupport = (err == 0);
- if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
+ args.contextsupport = (err == 0);
+ if (u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
- bmval2 &= ~FATTR4_WORD2_SECURITY_LABEL;
+ u.attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
}
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
- status = nfsd4_encode_bitmap(xdr, bmval0, bmval1, bmval2);
+ /* attrmask */
+ status = nfsd4_encode_bitmap4(xdr, u.attrmask[0],
+ u.attrmask[1], u.attrmask[2]);
if (status)
goto out;
+ /* attr_vals */
attrlen_offset = xdr->buf->len;
attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
if (!attrlen_p)
goto out_resource;
-
- if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
- u32 supp[3];
-
- memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
-
- if (!IS_POSIXACL(dentry->d_inode))
- supp[0] &= ~FATTR4_WORD0_ACL;
- if (!contextsupport)
- supp[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
- if (!supp[2]) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(2);
- *p++ = cpu_to_be32(supp[0]);
- *p++ = cpu_to_be32(supp[1]);
- } else {
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(supp[0]);
- *p++ = cpu_to_be32(supp[1]);
- *p++ = cpu_to_be32(supp[2]);
- }
- }
- if (bmval0 & FATTR4_WORD0_TYPE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- dummy = nfs4_file_type(stat.mode);
- if (dummy == NF4BAD) {
- status = nfserr_serverfault;
+ for_each_set_bit(bit, (const unsigned long *)&u.mask,
+ ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)) {
+ status = nfsd4_enc_fattr4_encode_ops[bit](xdr, &args);
+ if (status != nfs_ok)
goto out;
- }
- *p++ = cpu_to_be32(dummy);
- }
- if (bmval0 & FATTR4_WORD0_FH_EXPIRE_TYPE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- if (exp->ex_flags & NFSEXP_NOSUBTREECHECK)
- *p++ = cpu_to_be32(NFS4_FH_PERSISTENT);
- else
- *p++ = cpu_to_be32(NFS4_FH_PERSISTENT|
- NFS4_FH_VOL_RENAME);
}
- if (bmval0 & FATTR4_WORD0_CHANGE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = encode_change(p, &stat, d_inode(dentry), exp);
- }
- if (bmval0 & FATTR4_WORD0_SIZE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, stat.size);
- }
- if (bmval0 & FATTR4_WORD0_LINK_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_SYMLINK_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_NAMED_ATTR) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_FSID) {
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- goto out_resource;
- if (exp->ex_fslocs.migrated) {
- p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MAJOR);
- p = xdr_encode_hyper(p, NFS4_REFERRAL_FSID_MINOR);
- } else switch(fsid_source(fhp)) {
- case FSIDSOURCE_FSID:
- p = xdr_encode_hyper(p, (u64)exp->ex_fsid);
- p = xdr_encode_hyper(p, (u64)0);
- break;
- case FSIDSOURCE_DEV:
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(MAJOR(stat.dev));
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(MINOR(stat.dev));
- break;
- case FSIDSOURCE_UUID:
- p = xdr_encode_opaque_fixed(p, exp->ex_uuid,
- EX_UUID_LEN);
- break;
- }
- }
- if (bmval0 & FATTR4_WORD0_UNIQUE_HANDLES) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_LEASE_TIME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(nn->nfsd4_lease);
- }
- if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(rdattr_err);
- }
- if (bmval0 & FATTR4_WORD0_ACL) {
- struct nfs4_ace *ace;
-
- if (acl == NULL) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
-
- *p++ = cpu_to_be32(0);
- goto out_acl;
- }
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(acl->naces);
-
- for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) {
- p = xdr_reserve_space(xdr, 4*3);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(ace->type);
- *p++ = cpu_to_be32(ace->flag);
- *p++ = cpu_to_be32(ace->access_mask &
- NFS4_ACE_MASK_ALL);
- status = nfsd4_encode_aclname(xdr, rqstp, ace);
- if (status)
- goto out;
- }
- }
-out_acl:
- if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(IS_POSIXACL(dentry->d_inode) ?
- ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
- }
- if (bmval0 & FATTR4_WORD0_CANSETTIME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(0);
- }
- if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_CHOWN_RESTRICTED) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_FILEHANDLE) {
- p = xdr_reserve_space(xdr, fhp->fh_handle.fh_size + 4);
- if (!p)
- goto out_resource;
- p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw,
- fhp->fh_handle.fh_size);
- }
- if (bmval0 & FATTR4_WORD0_FILEID) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, stat.ino);
- }
- if (bmval0 & FATTR4_WORD0_FILES_AVAIL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
- }
- if (bmval0 & FATTR4_WORD0_FILES_FREE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_ffree);
- }
- if (bmval0 & FATTR4_WORD0_FILES_TOTAL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) statfs.f_files);
- }
- if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
- status = nfsd4_encode_fs_locations(xdr, rqstp, exp);
- if (status)
- goto out;
- }
- if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval0 & FATTR4_WORD0_MAXFILESIZE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, exp->ex_path.mnt->mnt_sb->s_maxbytes);
- }
- if (bmval0 & FATTR4_WORD0_MAXLINK) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(255);
- }
- if (bmval0 & FATTR4_WORD0_MAXNAME) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(statfs.f_namelen);
- }
- if (bmval0 & FATTR4_WORD0_MAXREAD) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
- }
- if (bmval0 & FATTR4_WORD0_MAXWRITE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- p = xdr_encode_hyper(p, (u64) svc_max_payload(rqstp));
- }
- if (bmval1 & FATTR4_WORD1_MODE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.mode & S_IALLUGO);
- }
- if (bmval1 & FATTR4_WORD1_NO_TRUNC) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(1);
- }
- if (bmval1 & FATTR4_WORD1_NUMLINKS) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.nlink);
- }
- if (bmval1 & FATTR4_WORD1_OWNER) {
- status = nfsd4_encode_user(xdr, rqstp, stat.uid);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
- status = nfsd4_encode_group(xdr, rqstp, stat.gid);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_RAWDEV) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32((u32) MAJOR(stat.rdev));
- *p++ = cpu_to_be32((u32) MINOR(stat.rdev));
- }
- if (bmval1 & FATTR4_WORD1_SPACE_AVAIL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_bavail * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_FREE) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_bfree * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_TOTAL) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)statfs.f_blocks * (u64)statfs.f_bsize;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_SPACE_USED) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- dummy64 = (u64)stat.blocks << 9;
- p = xdr_encode_hyper(p, dummy64);
- }
- if (bmval1 & FATTR4_WORD1_TIME_ACCESS) {
- status = nfsd4_encode_nfstime4(xdr, &stat.atime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_TIME_CREATE) {
- status = nfsd4_encode_nfstime4(xdr, &stat.btime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_TIME_DELTA) {
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- goto out_resource;
- p = encode_time_delta(p, d_inode(dentry));
- }
- if (bmval1 & FATTR4_WORD1_TIME_METADATA) {
- status = nfsd4_encode_nfstime4(xdr, &stat.ctime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
- status = nfsd4_encode_nfstime4(xdr, &stat.mtime);
- if (status)
- goto out;
- }
- if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) {
- u64 ino = stat.ino;
-
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- goto out_resource;
- /*
- * Get ino of mountpoint in parent filesystem, if not ignoring
- * crossmount and this is the root of a cross-mounted
- * filesystem.
- */
- if (ignore_crossmnt == 0 &&
- dentry == exp->ex_path.mnt->mnt_root) {
- err = nfsd4_get_mounted_on_ino(exp, &ino);
- if (err)
- goto out_nfserr;
- }
- p = xdr_encode_hyper(p, ino);
- }
-#ifdef CONFIG_NFSD_PNFS
- if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
- if (status)
- goto out;
- }
-
- if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
- if (status)
- goto out;
- }
-
- if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- *p++ = cpu_to_be32(stat.blksize);
- }
-#endif /* CONFIG_NFSD_PNFS */
- if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
- u32 supp[3];
-
- memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
- supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
- supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
- supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
-
- status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
- if (status)
- goto out;
- }
-
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) {
- status = nfsd4_encode_security_label(xdr, rqstp, context,
- contextlen);
- if (status)
- goto out;
- }
-#endif
-
- if (bmval2 & FATTR4_WORD2_XATTR_SUPPORT) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- goto out_resource;
- err = xattr_supports_user_prefix(d_inode(dentry));
- *p++ = cpu_to_be32(err == 0);
- }
-
*attrlen_p = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT);
status = nfs_ok;
out:
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (context)
- security_release_secctx(context, contextlen);
+ if (args.context)
+ security_release_secctx(args.context, args.contextlen);
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
- kfree(acl);
+ kfree(args.acl);
if (tempfh) {
fh_put(tempfh);
kfree(tempfh);
@@ -3514,12 +3671,28 @@ __be32 nfsd4_encode_fattr_to_buf(__be32 **p, int words,
__be32 ret;
svcxdr_init_encode_from_buffer(&xdr, &dummy, *p, words << 2);
- ret = nfsd4_encode_fattr(&xdr, fhp, exp, dentry, bmval, rqstp,
- ignore_crossmnt);
+ ret = nfsd4_encode_fattr4(rqstp, &xdr, fhp, exp, dentry, bmval,
+ ignore_crossmnt);
*p = xdr.p;
return ret;
}
+/*
+ * The buffer space for this field was reserved during a previous
+ * call to nfsd4_encode_entry4().
+ */
+static void nfsd4_encode_entry4_nfs_cookie4(const struct nfsd4_readdir *readdir,
+ u64 offset)
+{
+ __be64 cookie = cpu_to_be64(offset);
+ struct xdr_stream *xdr = readdir->xdr;
+
+ if (!readdir->cookie_offset)
+ return;
+ write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset, &cookie,
+ sizeof(cookie));
+}
+
static inline int attributes_need_mount(u32 *bmval)
{
if (bmval[0] & ~(FATTR4_WORD0_RDATTR_ERROR | FATTR4_WORD0_LEASE_TIME))
@@ -3530,8 +3703,8 @@ static inline int attributes_need_mount(u32 *bmval)
}
static __be32
-nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
- const char *name, int namlen)
+nfsd4_encode_entry4_fattr(struct nfsd4_readdir *cd, const char *name,
+ int namlen)
{
struct svc_export *exp = cd->rd_fhp->fh_export;
struct dentry *dentry;
@@ -3574,33 +3747,34 @@ nfsd4_encode_dirent_fattr(struct xdr_stream *xdr, struct nfsd4_readdir *cd,
}
out_encode:
- nfserr = nfsd4_encode_fattr(xdr, NULL, exp, dentry, cd->rd_bmval,
- cd->rd_rqstp, ignore_crossmnt);
+ nfserr = nfsd4_encode_fattr4(cd->rd_rqstp, cd->xdr, NULL, exp, dentry,
+ cd->rd_bmval, ignore_crossmnt);
out_put:
dput(dentry);
exp_put(exp);
return nfserr;
}
-static __be32 *
-nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
+static __be32
+nfsd4_encode_entry4_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
{
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return NULL;
- *p++ = htonl(2);
- *p++ = htonl(FATTR4_WORD0_RDATTR_ERROR); /* bmval0 */
- *p++ = htonl(0); /* bmval1 */
+ __be32 status;
- *p++ = htonl(4); /* attribute length */
- *p++ = nfserr; /* no htonl */
- return p;
+ /* attrmask */
+ status = nfsd4_encode_bitmap4(xdr, FATTR4_WORD0_RDATTR_ERROR, 0, 0);
+ if (status != nfs_ok)
+ return status;
+ /* attr_vals */
+ if (xdr_stream_encode_u32(xdr, XDR_UNIT) != XDR_UNIT)
+ return nfserr_resource;
+ /* rdattr_error */
+ if (xdr_stream_encode_be32(xdr, nfserr) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
}
static int
-nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
+nfsd4_encode_entry4(void *ccdv, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_cd *ccd = ccdv;
@@ -3611,8 +3785,6 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
u32 name_and_cookie;
int entry_bytes;
__be32 nfserr = nfserr_toosmall;
- __be64 wire_offset;
- __be32 *p;
/* In nfsv4, "." and ".." never make it onto the wire.. */
if (name && isdotent(name, namlen)) {
@@ -3620,24 +3792,19 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
return 0;
}
- if (cd->cookie_offset) {
- wire_offset = cpu_to_be64(offset);
- write_bytes_to_xdr_buf(xdr->buf, cd->cookie_offset,
- &wire_offset, 8);
- }
+ /* Encode the previous entry's cookie value */
+ nfsd4_encode_entry4_nfs_cookie4(cd, offset);
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ if (xdr_stream_encode_item_present(xdr) != XDR_UNIT)
goto fail;
- *p++ = xdr_one; /* mark entry present */
+
+ /* Reserve send buffer space for this entry's cookie value. */
cookie_offset = xdr->buf->len;
- p = xdr_reserve_space(xdr, 3*4 + namlen);
- if (!p)
+ if (nfsd4_encode_nfs_cookie4(xdr, OFFSET_MAX) != nfs_ok)
goto fail;
- p = xdr_encode_hyper(p, OFFSET_MAX); /* offset of next entry */
- p = xdr_encode_array(p, name, namlen); /* name length & name */
-
- nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
+ if (nfsd4_encode_component4(xdr, name, namlen) != nfs_ok)
+ goto fail;
+ nfserr = nfsd4_encode_entry4_fattr(cd, name, namlen);
switch (nfserr) {
case nfs_ok:
break;
@@ -3668,8 +3835,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
*/
if (!(cd->rd_bmval[0] & FATTR4_WORD0_RDATTR_ERROR))
goto fail;
- p = nfsd4_encode_rdattr_error(xdr, nfserr);
- if (p == NULL) {
+ if (nfsd4_encode_entry4_rdattr_error(xdr, nfserr)) {
nfserr = nfserr_toosmall;
goto fail;
}
@@ -3727,18 +3893,26 @@ nfsd4_encode_clientid4(struct xdr_stream *xdr, const clientid_t *clientid)
return nfs_ok;
}
+/* This is a frequently-encoded item; open-coded for speed */
static __be32
-nfsd4_encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+nfsd4_encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
{
__be32 *p;
- p = xdr_reserve_space(xdr, sizeof(stateid_t));
+ p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(sid->si_generation);
- p = xdr_encode_opaque_fixed(p, &sid->si_opaque,
- sizeof(stateid_opaque_t));
- return 0;
+ memcpy(p, &sid->si_opaque, sizeof(sid->si_opaque));
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_encode_sessionid4(struct xdr_stream *xdr,
+ const struct nfs4_sessionid *sessionid)
+{
+ return nfsd4_encode_opaque_fixed(xdr, sessionid->data,
+ NFS4_MAX_SESSIONID_LEN);
}
static __be32
@@ -3747,14 +3921,14 @@ nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_access *access = &u->access;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ __be32 status;
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(access->ac_supported);
- *p++ = cpu_to_be32(access->ac_resp_access);
- return 0;
+ /* supported */
+ status = nfsd4_encode_uint32_t(xdr, access->ac_supported);
+ if (status != nfs_ok)
+ return status;
+ /* access */
+ return nfsd4_encode_uint32_t(xdr, access->ac_resp_access);
}
static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr,
@@ -3762,17 +3936,16 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp,
{
struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 8);
- if (!p)
+ /* bctsr_sessid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &bcts->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* bctsr_dir */
+ if (xdr_stream_encode_u32(xdr, bcts->dir) != XDR_UNIT)
return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, bcts->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(bcts->dir);
- /* Upshifting from TCP to RDMA is not supported */
- *p++ = cpu_to_be32(0);
- return 0;
+ /* bctsr_use_conn_in_rdma_mode */
+ return nfsd4_encode_bool(xdr, false);
}
static __be32
@@ -3782,7 +3955,8 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_close *close = &u->close;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &close->cl_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &close->cl_stateid);
}
@@ -3802,11 +3976,13 @@ nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_create *create = &u->create;
struct xdr_stream *xdr = resp->xdr;
+ /* cinfo */
nfserr = nfsd4_encode_change_info4(xdr, &create->cr_cinfo);
if (nfserr)
return nfserr;
- return nfsd4_encode_bitmap(xdr, create->cr_bmval[0],
- create->cr_bmval[1], create->cr_bmval[2]);
+ /* attrset */
+ return nfsd4_encode_bitmap4(xdr, create->cr_bmval[0],
+ create->cr_bmval[1], create->cr_bmval[2]);
}
static __be32
@@ -3817,65 +3993,56 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, __be32 nfserr,
struct svc_fh *fhp = getattr->ga_fhp;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_fattr(xdr, fhp, fhp->fh_export, fhp->fh_dentry,
- getattr->ga_bmval, resp->rqstp, 0);
+ /* obj_attributes */
+ return nfsd4_encode_fattr4(resp->rqstp, xdr, fhp, fhp->fh_export,
+ fhp->fh_dentry, getattr->ga_bmval, 0);
}
static __be32
nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
- struct svc_fh **fhpp = &u->getfh;
struct xdr_stream *xdr = resp->xdr;
- struct svc_fh *fhp = *fhpp;
- unsigned int len;
- __be32 *p;
+ struct svc_fh *fhp = u->getfh;
- len = fhp->fh_handle.fh_size;
- p = xdr_reserve_space(xdr, len + 4);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque(p, &fhp->fh_handle.fh_raw, len);
- return 0;
+ /* object */
+ return nfsd4_encode_nfs_fh4(xdr, &fhp->fh_handle);
}
-/*
-* Including all fields other than the name, a LOCK4denied structure requires
-* 8(clientid) + 4(namelen) + 8(offset) + 8(length) + 4(type) = 32 bytes.
-*/
static __be32
-nfsd4_encode_lock_denied(struct xdr_stream *xdr, struct nfsd4_lock_denied *ld)
+nfsd4_encode_lock_owner4(struct xdr_stream *xdr, const clientid_t *clientid,
+ const struct xdr_netobj *owner)
{
- struct xdr_netobj *conf = &ld->ld_owner;
- __be32 *p;
+ __be32 status;
-again:
- p = xdr_reserve_space(xdr, 32 + XDR_LEN(conf->len));
- if (!p) {
- /*
- * Don't fail to return the result just because we can't
- * return the conflicting open:
- */
- if (conf->len) {
- kfree(conf->data);
- conf->len = 0;
- conf->data = NULL;
- goto again;
- }
+ /* clientid */
+ status = nfsd4_encode_clientid4(xdr, clientid);
+ if (status != nfs_ok)
+ return status;
+ /* owner */
+ return nfsd4_encode_opaque(xdr, owner->data, owner->len);
+}
+
+static __be32
+nfsd4_encode_lock4denied(struct xdr_stream *xdr,
+ const struct nfsd4_lock_denied *ld)
+{
+ __be32 status;
+
+ /* offset */
+ status = nfsd4_encode_offset4(xdr, ld->ld_start);
+ if (status != nfs_ok)
+ return status;
+ /* length */
+ status = nfsd4_encode_length4(xdr, ld->ld_length);
+ if (status != nfs_ok)
+ return status;
+ /* locktype */
+ if (xdr_stream_encode_u32(xdr, ld->ld_type) != XDR_UNIT)
return nfserr_resource;
- }
- p = xdr_encode_hyper(p, ld->ld_start);
- p = xdr_encode_hyper(p, ld->ld_length);
- *p++ = cpu_to_be32(ld->ld_type);
- if (conf->len) {
- p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
- p = xdr_encode_opaque(p, conf->data, conf->len);
- kfree(conf->data);
- } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
- p = xdr_encode_hyper(p, (u64)0); /* clientid */
- *p++ = cpu_to_be32(0); /* length of owner name */
- }
- return nfserr_denied;
+ /* owner */
+ return nfsd4_encode_lock_owner4(xdr, &ld->ld_clientid,
+ &ld->ld_owner);
}
static __be32
@@ -3884,13 +4051,21 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_lock *lock = &u->lock;
struct xdr_stream *xdr = resp->xdr;
+ __be32 status;
- if (!nfserr)
- nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
- else if (nfserr == nfserr_denied)
- nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-
- return nfserr;
+ switch (nfserr) {
+ case nfs_ok:
+ /* resok4 */
+ status = nfsd4_encode_stateid4(xdr, &lock->lk_resp_stateid);
+ break;
+ case nfserr_denied:
+ /* denied */
+ status = nfsd4_encode_lock4denied(xdr, &lock->lk_denied);
+ break;
+ default:
+ return nfserr;
+ }
+ return status != nfs_ok ? status : nfserr;
}
static __be32
@@ -3899,9 +4074,14 @@ nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_lockt *lockt = &u->lockt;
struct xdr_stream *xdr = resp->xdr;
+ __be32 status;
- if (nfserr == nfserr_denied)
- nfsd4_encode_lock_denied(xdr, &lockt->lt_denied);
+ if (nfserr == nfserr_denied) {
+ /* denied */
+ status = nfsd4_encode_lock4denied(xdr, &lockt->lt_denied);
+ if (status != nfs_ok)
+ return status;
+ }
return nfserr;
}
@@ -3912,7 +4092,8 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_locku *locku = &u->locku;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &locku->lu_stateid);
+ /* lock_stateid */
+ return nfsd4_encode_stateid4(xdr, &locku->lu_stateid);
}
@@ -3926,104 +4107,159 @@ nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfsd4_encode_change_info4(xdr, &link->li_cinfo);
}
+/*
+ * This implementation does not yet support returning an ACE in an
+ * OPEN that offers a delegation.
+ */
+static __be32
+nfsd4_encode_open_nfsace4(struct xdr_stream *xdr)
+{
+ __be32 status;
+
+ /* type */
+ status = nfsd4_encode_acetype4(xdr, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* flag */
+ status = nfsd4_encode_aceflag4(xdr, 0);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* access mask */
+ status = nfsd4_encode_acemask4(xdr, 0);
+ if (status != nfs_ok)
+ return nfserr_resource;
+ /* who - empty for now */
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+}
static __be32
-nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_open_read_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
{
- struct nfsd4_open *open = &u->open;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ __be32 status;
- nfserr = nfsd4_encode_stateid(xdr, &open->op_stateid);
- if (nfserr)
- return nfserr;
- nfserr = nfsd4_encode_change_info4(xdr, &open->op_cinfo);
- if (nfserr)
- return nfserr;
- if (xdr_stream_encode_u32(xdr, open->op_rflags) < 0)
+ /* stateid */
+ status = nfsd4_encode_stateid4(xdr, &open->op_delegate_stateid);
+ if (status != nfs_ok)
+ return status;
+ /* recall */
+ status = nfsd4_encode_bool(xdr, open->op_recall);
+ if (status != nfs_ok)
+ return status;
+ /* permissions */
+ return nfsd4_encode_open_nfsace4(xdr);
+}
+
+static __be32
+nfsd4_encode_nfs_space_limit4(struct xdr_stream *xdr, u64 filesize)
+{
+ /* limitby */
+ if (xdr_stream_encode_u32(xdr, NFS4_LIMIT_SIZE) != XDR_UNIT)
return nfserr_resource;
+ /* filesize */
+ return nfsd4_encode_uint64_t(xdr, filesize);
+}
- nfserr = nfsd4_encode_bitmap(xdr, open->op_bmval[0], open->op_bmval[1],
- open->op_bmval[2]);
- if (nfserr)
- return nfserr;
+static __be32
+nfsd4_encode_open_write_delegation4(struct xdr_stream *xdr,
+ struct nfsd4_open *open)
+{
+ __be32 status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* stateid */
+ status = nfsd4_encode_stateid4(xdr, &open->op_delegate_stateid);
+ if (status != nfs_ok)
+ return status;
+ /* recall */
+ status = nfsd4_encode_bool(xdr, open->op_recall);
+ if (status != nfs_ok)
+ return status;
+ /* space_limit */
+ status = nfsd4_encode_nfs_space_limit4(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ return nfsd4_encode_open_nfsace4(xdr);
+}
+
+static __be32
+nfsd4_encode_open_none_delegation4(struct xdr_stream *xdr,
+ struct nfsd4_open *open)
+{
+ __be32 status = nfs_ok;
+
+ /* ond_why */
+ if (xdr_stream_encode_u32(xdr, open->op_why_no_deleg) != XDR_UNIT)
return nfserr_resource;
+ switch (open->op_why_no_deleg) {
+ case WND4_CONTENTION:
+ /* ond_server_will_push_deleg */
+ status = nfsd4_encode_bool(xdr, false);
+ break;
+ case WND4_RESOURCE:
+ /* ond_server_will_signal_avail */
+ status = nfsd4_encode_bool(xdr, false);
+ }
+ return status;
+}
+
+static __be32
+nfsd4_encode_open_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open)
+{
+ __be32 status;
- *p++ = cpu_to_be32(open->op_delegate_type);
+ /* delegation_type */
+ if (xdr_stream_encode_u32(xdr, open->op_delegate_type) != XDR_UNIT)
+ return nfserr_resource;
switch (open->op_delegate_type) {
case NFS4_OPEN_DELEGATE_NONE:
+ status = nfs_ok;
break;
case NFS4_OPEN_DELEGATE_READ:
- nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
- if (nfserr)
- return nfserr;
- p = xdr_reserve_space(xdr, 20);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_recall);
-
- /*
- * TODO: ACE's in delegations
- */
- *p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
+ /* read */
+ status = nfsd4_encode_open_read_delegation4(xdr, open);
break;
case NFS4_OPEN_DELEGATE_WRITE:
- nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
- if (nfserr)
- return nfserr;
-
- p = xdr_reserve_space(xdr, XDR_UNIT * 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_recall);
-
- /*
- * Always flush on close
- *
- * TODO: space_limit's in delegations
- */
- *p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
- *p++ = xdr_zero;
- *p++ = xdr_zero;
-
- /*
- * TODO: ACE's in delegations
- */
- *p++ = cpu_to_be32(NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0); /* XXX: is NULL principal ok? */
+ /* write */
+ status = nfsd4_encode_open_write_delegation4(xdr, open);
break;
- case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
- switch (open->op_why_no_deleg) {
- case WND4_CONTENTION:
- case WND4_RESOURCE:
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_why_no_deleg);
- /* deleg signaling not supported yet: */
- *p++ = cpu_to_be32(0);
- break;
- default:
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(open->op_why_no_deleg);
- }
+ case NFS4_OPEN_DELEGATE_NONE_EXT:
+ /* od_whynone */
+ status = nfsd4_encode_open_none_delegation4(xdr, open);
break;
default:
- BUG();
+ status = nfserr_serverfault;
}
- /* XXX save filehandle here */
- return 0;
+
+ return status;
+}
+
+static __be32
+nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_open *open = &u->open;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &open->op_stateid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* cinfo */
+ nfserr = nfsd4_encode_change_info4(xdr, &open->op_cinfo);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* rflags */
+ nfserr = nfsd4_encode_uint32_t(xdr, open->op_rflags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* attrset */
+ nfserr = nfsd4_encode_bitmap4(xdr, open->op_bmval[0],
+ open->op_bmval[1], open->op_bmval[2]);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* delegation */
+ return nfsd4_encode_open_delegation4(xdr, open);
}
static __be32
@@ -4033,7 +4269,8 @@ nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_open_confirm *oc = &u->open_confirm;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &oc->oc_resp_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &oc->oc_resp_stateid);
}
static __be32
@@ -4043,7 +4280,8 @@ nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_open_downgrade *od = &u->open_downgrade;
struct xdr_stream *xdr = resp->xdr;
- return nfsd4_encode_stateid(xdr, &od->od_stateid);
+ /* open_stateid */
+ return nfsd4_encode_stateid4(xdr, &od->od_stateid);
}
/*
@@ -4227,90 +4465,83 @@ out_err:
return nfserr;
}
-static __be32
-nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+static __be32 nfsd4_encode_dirlist4(struct xdr_stream *xdr,
+ struct nfsd4_readdir *readdir,
+ u32 max_payload)
{
- struct nfsd4_readdir *readdir = &u->readdir;
- int maxcount;
- int bytes_left;
+ int bytes_left, maxcount, starting_len = xdr->buf->len;
loff_t offset;
- __be64 wire_offset;
- struct xdr_stream *xdr = resp->xdr;
- int starting_len = xdr->buf->len;
- __be32 *p;
-
- nfserr = nfsd4_encode_verifier4(xdr, &readdir->rd_verf);
- if (nfserr != nfs_ok)
- return nfserr;
+ __be32 status;
/*
* Number of bytes left for directory entries allowing for the
- * final 8 bytes of the readdir and a following failed op:
+ * final 8 bytes of the readdir and a following failed op.
*/
- bytes_left = xdr->buf->buflen - xdr->buf->len
- - COMPOUND_ERR_SLACK_SPACE - 8;
- if (bytes_left < 0) {
- nfserr = nfserr_resource;
- goto err_no_verf;
- }
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
+ bytes_left = xdr->buf->buflen - xdr->buf->len -
+ COMPOUND_ERR_SLACK_SPACE - XDR_UNIT * 2;
+ if (bytes_left < 0)
+ return nfserr_resource;
+ maxcount = min_t(u32, readdir->rd_maxcount, max_payload);
+
/*
- * Note the rfc defines rd_maxcount as the size of the
- * READDIR4resok structure, which includes the verifier above
- * and the 8 bytes encoded at the end of this function:
+ * The RFC defines rd_maxcount as the size of the
+ * READDIR4resok structure, which includes the verifier
+ * and the 8 bytes encoded at the end of this function.
*/
- if (maxcount < 16) {
- nfserr = nfserr_toosmall;
- goto err_no_verf;
- }
- maxcount = min_t(int, maxcount-16, bytes_left);
+ if (maxcount < XDR_UNIT * 4)
+ return nfserr_toosmall;
+ maxcount = min_t(int, maxcount - XDR_UNIT * 4, bytes_left);
- /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+ /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0 */
if (!readdir->rd_dircount)
- readdir->rd_dircount = svc_max_payload(resp->rqstp);
+ readdir->rd_dircount = max_payload;
+ /* *entries */
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
readdir->common.err = 0;
readdir->cookie_offset = 0;
-
offset = readdir->rd_cookie;
- nfserr = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp,
- &offset,
- &readdir->common, nfsd4_encode_dirent);
- if (nfserr == nfs_ok &&
- readdir->common.err == nfserr_toosmall &&
- xdr->buf->len == starting_len + 8) {
- /* nothing encoded; which limit did we hit?: */
- if (maxcount - 16 < bytes_left)
- /* It was the fault of rd_maxcount: */
- nfserr = nfserr_toosmall;
- else
- /* We ran out of buffer space: */
- nfserr = nfserr_resource;
+ status = nfsd_readdir(readdir->rd_rqstp, readdir->rd_fhp, &offset,
+ &readdir->common, nfsd4_encode_entry4);
+ if (status)
+ return status;
+ if (readdir->common.err == nfserr_toosmall &&
+ xdr->buf->len == starting_len) {
+ /* No entries were encoded. Which limit did we hit? */
+ if (maxcount - XDR_UNIT * 4 < bytes_left)
+ /* It was the fault of rd_maxcount */
+ return nfserr_toosmall;
+ /* We ran out of buffer space */
+ return nfserr_resource;
}
- if (nfserr)
- goto err_no_verf;
+ /* Encode the final entry's cookie value */
+ nfsd4_encode_entry4_nfs_cookie4(readdir, offset);
+ /* No entries follow */
+ if (xdr_stream_encode_item_absent(xdr) != XDR_UNIT)
+ return nfserr_resource;
- if (readdir->cookie_offset) {
- wire_offset = cpu_to_be64(offset);
- write_bytes_to_xdr_buf(xdr->buf, readdir->cookie_offset,
- &wire_offset, 8);
- }
+ /* eof */
+ return nfsd4_encode_bool(xdr, readdir->common.err == nfserr_eof);
+}
- p = xdr_reserve_space(xdr, 8);
- if (!p) {
- WARN_ON_ONCE(1);
- goto err_no_verf;
- }
- *p++ = 0; /* no more entries */
- *p++ = htonl(readdir->common.err == nfserr_eof);
+static __be32
+nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_readdir *readdir = &u->readdir;
+ struct xdr_stream *xdr = resp->xdr;
+ int starting_len = xdr->buf->len;
- return 0;
-err_no_verf:
- xdr_truncate_encode(xdr, starting_len);
+ /* cookieverf */
+ nfserr = nfsd4_encode_verifier4(xdr, &readdir->rd_verf);
+ if (nfserr != nfs_ok)
+ return nfserr;
+
+ /* reply */
+ nfserr = nfsd4_encode_dirlist4(xdr, readdir, svc_max_payload(resp->rqstp));
+ if (nfserr != nfs_ok)
+ xdr_truncate_encode(xdr, starting_len);
return nfserr;
}
@@ -4338,13 +4569,34 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr,
}
static __be32
+nfsd4_encode_rpcsec_gss_info(struct xdr_stream *xdr,
+ struct rpcsec_gss_info *info)
+{
+ __be32 status;
+
+ /* oid */
+ if (xdr_stream_encode_opaque(xdr, info->oid.data, info->oid.len) < 0)
+ return nfserr_resource;
+ /* qop */
+ status = nfsd4_encode_qop4(xdr, info->qop);
+ if (status != nfs_ok)
+ return status;
+ /* service */
+ if (xdr_stream_encode_u32(xdr, info->service) != XDR_UNIT)
+ return nfserr_resource;
+
+ return nfs_ok;
+}
+
+static __be32
nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
{
u32 i, nflavs, supported;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- __be32 *p, *flavorsp;
static bool report = true;
+ __be32 *flavorsp;
+ __be32 status;
if (exp->ex_nflavors) {
flavs = exp->ex_flavors;
@@ -4367,10 +4619,9 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
}
supported = 0;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ flavorsp = xdr_reserve_space(xdr, XDR_UNIT);
+ if (!flavorsp)
return nfserr_resource;
- flavorsp = p++; /* to be backfilled later */
for (i = 0; i < nflavs; i++) {
rpc_authflavor_t pf = flavs[i].pseudoflavor;
@@ -4378,20 +4629,22 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
if (rpcauth_get_gssinfo(pf, &info) == 0) {
supported++;
- p = xdr_reserve_space(xdr, 4 + 4 +
- XDR_LEN(info.oid.len) + 4 + 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(RPC_AUTH_GSS);
- p = xdr_encode_opaque(p, info.oid.data, info.oid.len);
- *p++ = cpu_to_be32(info.qop);
- *p++ = cpu_to_be32(info.service);
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS);
+ if (status != nfs_ok)
+ return status;
+ /* flavor_info */
+ status = nfsd4_encode_rpcsec_gss_info(xdr, &info);
+ if (status != nfs_ok)
+ return status;
} else if (pf < RPC_AUTH_MAXFLAVOR) {
supported++;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(pf);
+
+ /* flavor */
+ status = nfsd4_encode_uint32_t(xdr, pf);
+ if (status != nfs_ok)
+ return status;
} else {
if (report)
pr_warn("NFS: SECINFO: security flavor %u "
@@ -4401,7 +4654,7 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp)
if (nflavs != supported)
report = false;
- *flavorsp = htonl(supported);
+ *flavorsp = cpu_to_be32(supported);
return 0;
}
@@ -4425,34 +4678,25 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp);
}
-/*
- * The SETATTR encode routine is special -- it always encodes a bitmap,
- * regardless of the error status.
- */
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_setattr *setattr = &u->setattr;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
+ __be32 status;
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- return nfserr_resource;
- if (nfserr) {
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- *p++ = cpu_to_be32(0);
- }
- else {
- *p++ = cpu_to_be32(3);
- *p++ = cpu_to_be32(setattr->sa_bmval[0]);
- *p++ = cpu_to_be32(setattr->sa_bmval[1]);
- *p++ = cpu_to_be32(setattr->sa_bmval[2]);
+ switch (nfserr) {
+ case nfs_ok:
+ /* attrsset */
+ status = nfsd4_encode_bitmap4(resp->xdr, setattr->sa_bmval[0],
+ setattr->sa_bmval[1],
+ setattr->sa_bmval[2]);
+ break;
+ default:
+ /* attrsset */
+ status = nfsd4_encode_bitmap4(resp->xdr, 0, 0, 0);
}
- return nfserr;
+ return status != nfs_ok ? status : nfserr;
}
static __be32
@@ -4488,86 +4732,148 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
+ struct xdr_stream *xdr = resp->xdr;
- if (xdr_stream_encode_u32(resp->xdr, write->wr_bytes_written) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u32(resp->xdr, write->wr_how_written) < 0)
+ /* count */
+ nfserr = nfsd4_encode_count4(xdr, write->wr_bytes_written);
+ if (nfserr)
+ return nfserr;
+ /* committed */
+ if (xdr_stream_encode_u32(xdr, write->wr_how_written) != XDR_UNIT)
return nfserr_resource;
- return nfsd4_encode_verifier4(resp->xdr, &write->wr_verifier);
+ /* writeverf */
+ return nfsd4_encode_verifier4(xdr, &write->wr_verifier);
}
static __be32
-nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_state_protect_ops4(struct xdr_stream *xdr,
+ struct nfsd4_exchange_id *exid)
{
- struct nfsd4_exchange_id *exid = &u->exchange_id;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- char *major_id;
- char *server_scope;
- int major_id_sz;
- int server_scope_sz;
- uint64_t minor_id = 0;
- struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
+ __be32 status;
- major_id = nn->nfsd_name;
- major_id_sz = strlen(nn->nfsd_name);
- server_scope = nn->nfsd_name;
- server_scope_sz = strlen(nn->nfsd_name);
+ /* spo_must_enforce */
+ status = nfsd4_encode_bitmap4(xdr, exid->spo_must_enforce[0],
+ exid->spo_must_enforce[1],
+ exid->spo_must_enforce[2]);
+ if (status != nfs_ok)
+ return status;
+ /* spo_must_allow */
+ return nfsd4_encode_bitmap4(xdr, exid->spo_must_allow[0],
+ exid->spo_must_allow[1],
+ exid->spo_must_allow[2]);
+}
- if (nfsd4_encode_clientid4(xdr, &exid->clientid) != nfs_ok)
- return nfserr_resource;
- if (xdr_stream_encode_u32(xdr, exid->seqid) < 0)
- return nfserr_resource;
- if (xdr_stream_encode_u32(xdr, exid->flags) < 0)
- return nfserr_resource;
+static __be32
+nfsd4_encode_state_protect4_r(struct xdr_stream *xdr, struct nfsd4_exchange_id *exid)
+{
+ __be32 status;
- if (xdr_stream_encode_u32(xdr, exid->spa_how) < 0)
+ if (xdr_stream_encode_u32(xdr, exid->spa_how) != XDR_UNIT)
return nfserr_resource;
switch (exid->spa_how) {
case SP4_NONE:
+ status = nfs_ok;
break;
case SP4_MACH_CRED:
- /* spo_must_enforce bitmap: */
- nfserr = nfsd4_encode_bitmap(xdr,
- exid->spo_must_enforce[0],
- exid->spo_must_enforce[1],
- exid->spo_must_enforce[2]);
- if (nfserr)
- return nfserr;
- /* spo_must_allow bitmap: */
- nfserr = nfsd4_encode_bitmap(xdr,
- exid->spo_must_allow[0],
- exid->spo_must_allow[1],
- exid->spo_must_allow[2]);
- if (nfserr)
- return nfserr;
+ /* spr_mach_ops */
+ status = nfsd4_encode_state_protect_ops4(xdr, exid);
break;
default:
- WARN_ON_ONCE(1);
+ status = nfserr_serverfault;
}
+ return status;
+}
- p = xdr_reserve_space(xdr,
- 8 /* so_minor_id */ +
- 4 /* so_major_id.len */ +
- (XDR_QUADLEN(major_id_sz) * 4) +
- 4 /* eir_server_scope.len */ +
- (XDR_QUADLEN(server_scope_sz) * 4) +
- 4 /* eir_server_impl_id.count (0) */);
- if (!p)
+static __be32
+nfsd4_encode_server_owner4(struct xdr_stream *xdr, struct svc_rqst *rqstp)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ __be32 status;
+
+ /* so_minor_id */
+ status = nfsd4_encode_uint64_t(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ /* so_major_id */
+ return nfsd4_encode_opaque(xdr, nn->nfsd_name, strlen(nn->nfsd_name));
+}
+
+static __be32
+nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd_net *nn = net_generic(SVC_NET(resp->rqstp), nfsd_net_id);
+ struct nfsd4_exchange_id *exid = &u->exchange_id;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* eir_clientid */
+ nfserr = nfsd4_encode_clientid4(xdr, &exid->clientid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_sequenceid */
+ nfserr = nfsd4_encode_sequenceid4(xdr, exid->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, exid->flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_state_protect */
+ nfserr = nfsd4_encode_state_protect4_r(xdr, exid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_owner */
+ nfserr = nfsd4_encode_server_owner4(xdr, resp->rqstp);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_scope */
+ nfserr = nfsd4_encode_opaque(xdr, nn->nfsd_name,
+ strlen(nn->nfsd_name));
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* eir_server_impl_id<1> */
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
- /* The server_owner struct */
- p = xdr_encode_hyper(p, minor_id); /* Minor id */
- /* major id */
- p = xdr_encode_opaque(p, major_id, major_id_sz);
+ return nfs_ok;
+}
- /* Server scope */
- p = xdr_encode_opaque(p, server_scope, server_scope_sz);
+static __be32
+nfsd4_encode_channel_attrs4(struct xdr_stream *xdr,
+ const struct nfsd4_channel_attrs *attrs)
+{
+ __be32 status;
- /* Implementation id */
- *p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
- return 0;
+ /* ca_headerpadsize */
+ status = nfsd4_encode_count4(xdr, 0);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxrequestsize */
+ status = nfsd4_encode_count4(xdr, attrs->maxreq_sz);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxresponsesize */
+ status = nfsd4_encode_count4(xdr, attrs->maxresp_sz);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxresponsesize_cached */
+ status = nfsd4_encode_count4(xdr, attrs->maxresp_cached);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxoperations */
+ status = nfsd4_encode_count4(xdr, attrs->maxops);
+ if (status != nfs_ok)
+ return status;
+ /* ca_maxrequests */
+ status = nfsd4_encode_count4(xdr, attrs->maxreqs);
+ if (status != nfs_ok)
+ return status;
+ /* ca_rdma_ird<1> */
+ if (xdr_stream_encode_u32(xdr, attrs->nr_rdma_attrs) != XDR_UNIT)
+ return nfserr_resource;
+ if (attrs->nr_rdma_attrs)
+ return nfsd4_encode_uint32_t(xdr, attrs->rdma_attrs);
+ return nfs_ok;
}
static __be32
@@ -4576,52 +4882,25 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_create_session *sess = &u->create_session;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 24);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, sess->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(sess->seqid);
- *p++ = cpu_to_be32(sess->flags);
- p = xdr_reserve_space(xdr, 28);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0); /* headerpadsz */
- *p++ = cpu_to_be32(sess->fore_channel.maxreq_sz);
- *p++ = cpu_to_be32(sess->fore_channel.maxresp_sz);
- *p++ = cpu_to_be32(sess->fore_channel.maxresp_cached);
- *p++ = cpu_to_be32(sess->fore_channel.maxops);
- *p++ = cpu_to_be32(sess->fore_channel.maxreqs);
- *p++ = cpu_to_be32(sess->fore_channel.nr_rdma_attrs);
-
- if (sess->fore_channel.nr_rdma_attrs) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(sess->fore_channel.rdma_attrs);
- }
-
- p = xdr_reserve_space(xdr, 28);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0); /* headerpadsz */
- *p++ = cpu_to_be32(sess->back_channel.maxreq_sz);
- *p++ = cpu_to_be32(sess->back_channel.maxresp_sz);
- *p++ = cpu_to_be32(sess->back_channel.maxresp_cached);
- *p++ = cpu_to_be32(sess->back_channel.maxops);
- *p++ = cpu_to_be32(sess->back_channel.maxreqs);
- *p++ = cpu_to_be32(sess->back_channel.nr_rdma_attrs);
-
- if (sess->back_channel.nr_rdma_attrs) {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(sess->back_channel.rdma_attrs);
- }
- return 0;
+ /* csr_sessionid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &sess->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_sequence */
+ nfserr = nfsd4_encode_sequenceid4(xdr, sess->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, sess->flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_fore_chan_attrs */
+ nfserr = nfsd4_encode_channel_attrs4(xdr, &sess->fore_channel);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* csr_back_chan_attrs */
+ return nfsd4_encode_channel_attrs4(xdr, &sess->back_channel);
}
static __be32
@@ -4630,22 +4909,35 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_sequence *seq = &u->sequence;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 20);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_opaque_fixed(p, seq->sessionid.data,
- NFS4_MAX_SESSIONID_LEN);
- *p++ = cpu_to_be32(seq->seqid);
- *p++ = cpu_to_be32(seq->slotid);
+ /* sr_sessionid */
+ nfserr = nfsd4_encode_sessionid4(xdr, &seq->sessionid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_sequenceid */
+ nfserr = nfsd4_encode_sequenceid4(xdr, seq->seqid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->slotid);
+ if (nfserr != nfs_ok)
+ return nfserr;
/* Note slotid's are numbered from zero: */
- *p++ = cpu_to_be32(seq->maxslots - 1); /* sr_highest_slotid */
- *p++ = cpu_to_be32(seq->maxslots - 1); /* sr_target_highest_slotid */
- *p++ = cpu_to_be32(seq->status_flags);
+ /* sr_highest_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_target_highest_slotid */
+ nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_status_flags */
+ nfserr = nfsd4_encode_uint32_t(xdr, seq->status_flags);
+ if (nfserr != nfs_ok)
+ return nfserr;
resp->cstate.data_offset = xdr->buf->len; /* DRC cache data pointer */
- return 0;
+ return nfs_ok;
}
static __be32
@@ -4653,125 +4945,132 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
- struct xdr_stream *xdr = resp->xdr;
struct nfsd4_test_stateid_id *stateid, *next;
- __be32 *p;
+ struct xdr_stream *xdr = resp->xdr;
- p = xdr_reserve_space(xdr, 4 + (4 * test_stateid->ts_num_ids));
- if (!p)
+ /* tsr_status_codes<> */
+ if (xdr_stream_encode_u32(xdr, test_stateid->ts_num_ids) != XDR_UNIT)
return nfserr_resource;
- *p++ = htonl(test_stateid->ts_num_ids);
-
- list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
- *p++ = stateid->ts_id_status;
+ list_for_each_entry_safe(stateid, next,
+ &test_stateid->ts_stateid_list, ts_id_list) {
+ if (xdr_stream_encode_be32(xdr, stateid->ts_id_status) != XDR_UNIT)
+ return nfserr_resource;
}
-
- return 0;
+ return nfs_ok;
}
#ifdef CONFIG_NFSD_PNFS
static __be32
-nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_device_addr4(struct xdr_stream *xdr,
+ const struct nfsd4_getdeviceinfo *gdev)
{
- struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
- struct xdr_stream *xdr = resp->xdr;
+ u32 needed_len, starting_len = xdr->buf->len;
const struct nfsd4_layout_ops *ops;
- u32 starting_len = xdr->buf->len, needed_len;
- __be32 *p;
+ __be32 status;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
+ /* da_layout_type */
+ if (xdr_stream_encode_u32(xdr, gdev->gd_layout_type) != XDR_UNIT)
return nfserr_resource;
-
- *p++ = cpu_to_be32(gdev->gd_layout_type);
-
+ /* da_addr_body */
ops = nfsd4_layout_ops[gdev->gd_layout_type];
- nfserr = ops->encode_getdeviceinfo(xdr, gdev);
- if (nfserr) {
+ status = ops->encode_getdeviceinfo(xdr, gdev);
+ if (status != nfs_ok) {
/*
- * We don't bother to burden the layout drivers with
- * enforcing gd_maxcount, just tell the client to
- * come back with a bigger buffer if it's not enough.
+ * Don't burden the layout drivers with enforcing
+ * gd_maxcount. Just tell the client to come back
+ * with a bigger buffer if it's not enough.
*/
- if (xdr->buf->len + 4 > gdev->gd_maxcount)
+ if (xdr->buf->len + XDR_UNIT > gdev->gd_maxcount)
goto toosmall;
- return nfserr;
+ return status;
}
- if (gdev->gd_notify_types) {
- p = xdr_reserve_space(xdr, 4 + 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(1); /* bitmap length */
- *p++ = cpu_to_be32(gdev->gd_notify_types);
- } else {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = 0;
- }
+ return nfs_ok;
- return 0;
toosmall:
- dprintk("%s: maxcount too small\n", __func__);
- needed_len = xdr->buf->len + 4 /* notifications */;
+ needed_len = xdr->buf->len + XDR_UNIT; /* notifications */
xdr_truncate_encode(xdr, starting_len);
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(needed_len);
+
+ status = nfsd4_encode_count4(xdr, needed_len);
+ if (status != nfs_ok)
+ return status;
return nfserr_toosmall;
}
static __be32
-nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
- struct nfsd4_layoutget *lgp = &u->layoutget;
+ struct nfsd4_getdeviceinfo *gdev = &u->getdeviceinfo;
struct xdr_stream *xdr = resp->xdr;
- const struct nfsd4_layout_ops *ops;
- __be32 *p;
-
- p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t));
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(1); /* we always set return-on-close */
- *p++ = cpu_to_be32(lgp->lg_sid.si_generation);
- p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque,
- sizeof(stateid_opaque_t));
+ /* gdir_device_addr */
+ nfserr = nfsd4_encode_device_addr4(xdr, gdev);
+ if (nfserr)
+ return nfserr;
+ /* gdir_notification */
+ return nfsd4_encode_bitmap4(xdr, gdev->gd_notify_types, 0, 0);
+}
- *p++ = cpu_to_be32(1); /* we always return a single layout */
- p = xdr_encode_hyper(p, lgp->lg_seg.offset);
- p = xdr_encode_hyper(p, lgp->lg_seg.length);
- *p++ = cpu_to_be32(lgp->lg_seg.iomode);
- *p++ = cpu_to_be32(lgp->lg_layout_type);
+static __be32
+nfsd4_encode_layout4(struct xdr_stream *xdr, const struct nfsd4_layoutget *lgp)
+{
+ const struct nfsd4_layout_ops *ops = nfsd4_layout_ops[lgp->lg_layout_type];
+ __be32 status;
- ops = nfsd4_layout_ops[lgp->lg_layout_type];
+ /* lo_offset */
+ status = nfsd4_encode_offset4(xdr, lgp->lg_seg.offset);
+ if (status != nfs_ok)
+ return status;
+ /* lo_length */
+ status = nfsd4_encode_length4(xdr, lgp->lg_seg.length);
+ if (status != nfs_ok)
+ return status;
+ /* lo_iomode */
+ if (xdr_stream_encode_u32(xdr, lgp->lg_seg.iomode) != XDR_UNIT)
+ return nfserr_resource;
+ /* lo_content */
+ if (xdr_stream_encode_u32(xdr, lgp->lg_layout_type) != XDR_UNIT)
+ return nfserr_resource;
return ops->encode_layoutget(xdr, lgp);
}
static __be32
+nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_layoutget *lgp = &u->layoutget;
+ struct xdr_stream *xdr = resp->xdr;
+
+ /* logr_return_on_close */
+ nfserr = nfsd4_encode_bool(xdr, true);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* logr_stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &lgp->lg_sid);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* logr_layout<> */
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
+ return nfserr_resource;
+ return nfsd4_encode_layout4(xdr, lgp);
+}
+
+static __be32
nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(lcp->lc_size_chg);
- if (lcp->lc_size_chg) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- p = xdr_encode_hyper(p, lcp->lc_newsize);
- }
-
- return 0;
+ /* ns_sizechanged */
+ nfserr = nfsd4_encode_bool(xdr, lcp->lc_size_chg);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ if (lcp->lc_size_chg)
+ /* ns_size */
+ return nfsd4_encode_length4(xdr, lcp->lc_newsize);
+ return nfs_ok;
}
static __be32
@@ -4780,103 +5079,108 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_layoutreturn *lrp = &u->layoutreturn;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(lrp->lrs_present);
+ /* lrs_present */
+ nfserr = nfsd4_encode_bool(xdr, lrp->lrs_present);
+ if (nfserr != nfs_ok)
+ return nfserr;
if (lrp->lrs_present)
- return nfsd4_encode_stateid(xdr, &lrp->lr_sid);
- return 0;
+ /* lrs_stateid */
+ return nfsd4_encode_stateid4(xdr, &lrp->lr_sid);
+ return nfs_ok;
}
#endif /* CONFIG_NFSD_PNFS */
static __be32
-nfsd42_encode_write_res(struct nfsd4_compoundres *resp,
- struct nfsd42_write_res *write, bool sync)
+nfsd4_encode_write_response4(struct xdr_stream *xdr,
+ const struct nfsd4_copy *copy)
{
- __be32 *p;
- p = xdr_reserve_space(resp->xdr, 4);
- if (!p)
- return nfserr_resource;
+ const struct nfsd42_write_res *write = &copy->cp_res;
+ u32 count = nfsd4_copy_is_sync(copy) ? 0 : 1;
+ __be32 status;
- if (sync)
- *p++ = cpu_to_be32(0);
- else {
- __be32 nfserr;
- *p++ = cpu_to_be32(1);
- nfserr = nfsd4_encode_stateid(resp->xdr, &write->cb_stateid);
- if (nfserr)
- return nfserr;
+ /* wr_callback_id<1> */
+ if (xdr_stream_encode_u32(xdr, count) != XDR_UNIT)
+ return nfserr_resource;
+ if (count) {
+ status = nfsd4_encode_stateid4(xdr, &write->cb_stateid);
+ if (status != nfs_ok)
+ return status;
}
- p = xdr_reserve_space(resp->xdr, 8 + 4 + NFS4_VERIFIER_SIZE);
- if (!p)
+
+ /* wr_count */
+ status = nfsd4_encode_length4(xdr, write->wr_bytes_written);
+ if (status != nfs_ok)
+ return status;
+ /* wr_committed */
+ if (xdr_stream_encode_u32(xdr, write->wr_stable_how) != XDR_UNIT)
return nfserr_resource;
+ /* wr_writeverf */
+ return nfsd4_encode_verifier4(xdr, &write->wr_verifier);
+}
- p = xdr_encode_hyper(p, write->wr_bytes_written);
- *p++ = cpu_to_be32(write->wr_stable_how);
- p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
- NFS4_VERIFIER_SIZE);
- return nfs_ok;
+static __be32 nfsd4_encode_copy_requirements4(struct xdr_stream *xdr,
+ const struct nfsd4_copy *copy)
+{
+ __be32 status;
+
+ /* cr_consecutive */
+ status = nfsd4_encode_bool(xdr, true);
+ if (status != nfs_ok)
+ return status;
+ /* cr_synchronous */
+ return nfsd4_encode_bool(xdr, nfsd4_copy_is_sync(copy));
}
static __be32
-nfsd42_encode_nl4_server(struct nfsd4_compoundres *resp, struct nl4_server *ns)
+nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- struct xdr_stream *xdr = resp->xdr;
- struct nfs42_netaddr *addr;
- __be32 *p;
+ struct nfsd4_copy *copy = &u->copy;
- p = xdr_reserve_space(xdr, 4);
- *p++ = cpu_to_be32(ns->nl4_type);
+ nfserr = nfsd4_encode_write_response4(resp->xdr, copy);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ return nfsd4_encode_copy_requirements4(resp->xdr, copy);
+}
+static __be32
+nfsd4_encode_netloc4(struct xdr_stream *xdr, const struct nl4_server *ns)
+{
+ __be32 status;
+
+ if (xdr_stream_encode_u32(xdr, ns->nl4_type) != XDR_UNIT)
+ return nfserr_resource;
switch (ns->nl4_type) {
case NL4_NETADDR:
- addr = &ns->u.nl4_addr;
-
- /* netid_len, netid, uaddr_len, uaddr (port included
- * in RPCBIND_MAXUADDRLEN)
- */
- p = xdr_reserve_space(xdr,
- 4 /* netid len */ +
- (XDR_QUADLEN(addr->netid_len) * 4) +
- 4 /* uaddr len */ +
- (XDR_QUADLEN(addr->addr_len) * 4));
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(addr->netid_len);
- p = xdr_encode_opaque_fixed(p, addr->netid,
- addr->netid_len);
- *p++ = cpu_to_be32(addr->addr_len);
- p = xdr_encode_opaque_fixed(p, addr->addr,
- addr->addr_len);
+ /* nl_addr */
+ status = nfsd4_encode_netaddr4(xdr, &ns->u.nl4_addr);
break;
default:
- WARN_ON_ONCE(ns->nl4_type != NL4_NETADDR);
- return nfserr_inval;
+ status = nfserr_serverfault;
}
-
- return 0;
+ return status;
}
static __be32
-nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
+nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
{
- struct nfsd4_copy *copy = &u->copy;
- __be32 *p;
+ struct nfsd4_copy_notify *cn = &u->copy_notify;
+ struct xdr_stream *xdr = resp->xdr;
- nfserr = nfsd42_encode_write_res(resp, &copy->cp_res,
- nfsd4_copy_is_sync(copy));
+ /* cnr_lease_time */
+ nfserr = nfsd4_encode_nfstime4(xdr, &cn->cpn_lease_time);
if (nfserr)
return nfserr;
-
- p = xdr_reserve_space(resp->xdr, 4 + 4);
- *p++ = xdr_one; /* cr_consecutive */
- *p = nfsd4_copy_is_sync(copy) ? xdr_one : xdr_zero;
- return 0;
+ /* cnr_stateid */
+ nfserr = nfsd4_encode_stateid4(xdr, &cn->cpn_cnr_stateid);
+ if (nfserr)
+ return nfserr;
+ /* cnr_source_server<> */
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
+ return nfserr_resource;
+ return nfsd4_encode_netloc4(xdr, cn->cpn_src);
}
static __be32
@@ -4885,14 +5189,15 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
{
struct nfsd4_offload_status *os = &u->offload_status;
struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
- p = xdr_reserve_space(xdr, 8 + 4);
- if (!p)
+ /* osr_count */
+ nfserr = nfsd4_encode_length4(xdr, os->count);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* osr_complete<1> */
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
- p = xdr_encode_hyper(p, os->count);
- *p++ = cpu_to_be32(0);
- return nfserr;
+ return nfs_ok;
}
static __be32
@@ -4970,53 +5275,18 @@ out:
}
static __be32
-nfsd4_encode_copy_notify(struct nfsd4_compoundres *resp, __be32 nfserr,
- union nfsd4_op_u *u)
-{
- struct nfsd4_copy_notify *cn = &u->copy_notify;
- struct xdr_stream *xdr = resp->xdr;
- __be32 *p;
-
- if (nfserr)
- return nfserr;
-
- /* 8 sec, 4 nsec */
- p = xdr_reserve_space(xdr, 12);
- if (!p)
- return nfserr_resource;
-
- /* cnr_lease_time */
- p = xdr_encode_hyper(p, cn->cpn_sec);
- *p++ = cpu_to_be32(cn->cpn_nsec);
-
- /* cnr_stateid */
- nfserr = nfsd4_encode_stateid(xdr, &cn->cpn_cnr_stateid);
- if (nfserr)
- return nfserr;
-
- /* cnr_src.nl_nsvr */
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
-
- *p++ = cpu_to_be32(1);
-
- nfserr = nfsd42_encode_nl4_server(resp, cn->cpn_src);
- return nfserr;
-}
-
-static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
union nfsd4_op_u *u)
{
struct nfsd4_seek *seek = &u->seek;
- __be32 *p;
-
- p = xdr_reserve_space(resp->xdr, 4 + 8);
- *p++ = cpu_to_be32(seek->seek_eof);
- p = xdr_encode_hyper(p, seek->seek_pos);
+ struct xdr_stream *xdr = resp->xdr;
- return 0;
+ /* sr_eof */
+ nfserr = nfsd4_encode_bool(xdr, seek->seek_eof);
+ if (nfserr != nfs_ok)
+ return nfserr;
+ /* sr_offset */
+ return nfsd4_encode_offset4(xdr, seek->seek_pos);
}
static __be32
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7ed02fb88a36..3e15b72f421d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -26,6 +26,7 @@
#include "pnfs.h"
#include "filecache.h"
#include "trace.h"
+#include "netlink.h"
/*
* We have a single directory with several nodes in it.
@@ -1132,7 +1133,7 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
/* Following advice from simple_fill_super documentation: */
inode->i_ino = iunique(sb, NFSD_MaxReserved);
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
@@ -1496,6 +1497,203 @@ static int create_proc_exports_entry(void)
unsigned int nfsd_net_id;
/**
+ * nfsd_nl_rpc_status_get_start - Prepare rpc_status_get dumpit
+ * @cb: netlink metadata and command arguments
+ *
+ * Return values:
+ * %0: The rpc_status_get command may proceed
+ * %-ENODEV: There is no NFSD running in this namespace
+ */
+int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb)
+{
+ struct nfsd_net *nn = net_generic(sock_net(cb->skb->sk), nfsd_net_id);
+ int ret = -ENODEV;
+
+ mutex_lock(&nfsd_mutex);
+ if (nn->nfsd_serv) {
+ svc_get(nn->nfsd_serv);
+ ret = 0;
+ }
+ mutex_unlock(&nfsd_mutex);
+
+ return ret;
+}
+
+static int nfsd_genl_rpc_status_compose_msg(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct nfsd_genl_rqstp *rqstp)
+{
+ void *hdr;
+ u32 i;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &nfsd_nl_family, 0, NFSD_CMD_RPC_STATUS_GET);
+ if (!hdr)
+ return -ENOBUFS;
+
+ if (nla_put_be32(skb, NFSD_A_RPC_STATUS_XID, rqstp->rq_xid) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_FLAGS, rqstp->rq_flags) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROG, rqstp->rq_prog) ||
+ nla_put_u32(skb, NFSD_A_RPC_STATUS_PROC, rqstp->rq_proc) ||
+ nla_put_u8(skb, NFSD_A_RPC_STATUS_VERSION, rqstp->rq_vers) ||
+ nla_put_s64(skb, NFSD_A_RPC_STATUS_SERVICE_TIME,
+ ktime_to_us(rqstp->rq_stime),
+ NFSD_A_RPC_STATUS_PAD))
+ return -ENOBUFS;
+
+ switch (rqstp->rq_saddr.sa_family) {
+ case AF_INET: {
+ const struct sockaddr_in *s_in, *d_in;
+
+ s_in = (const struct sockaddr_in *)&rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in *)&rqstp->rq_daddr;
+ if (nla_put_in_addr(skb, NFSD_A_RPC_STATUS_SADDR4,
+ s_in->sin_addr.s_addr) ||
+ nla_put_in_addr(skb, NFSD_A_RPC_STATUS_DADDR4,
+ d_in->sin_addr.s_addr) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT,
+ s_in->sin_port) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT,
+ d_in->sin_port))
+ return -ENOBUFS;
+ break;
+ }
+ case AF_INET6: {
+ const struct sockaddr_in6 *s_in, *d_in;
+
+ s_in = (const struct sockaddr_in6 *)&rqstp->rq_saddr;
+ d_in = (const struct sockaddr_in6 *)&rqstp->rq_daddr;
+ if (nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_SADDR6,
+ &s_in->sin6_addr) ||
+ nla_put_in6_addr(skb, NFSD_A_RPC_STATUS_DADDR6,
+ &d_in->sin6_addr) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_SPORT,
+ s_in->sin6_port) ||
+ nla_put_be16(skb, NFSD_A_RPC_STATUS_DPORT,
+ d_in->sin6_port))
+ return -ENOBUFS;
+ break;
+ }
+ }
+
+ for (i = 0; i < rqstp->rq_opcnt; i++)
+ if (nla_put_u32(skb, NFSD_A_RPC_STATUS_COMPOUND_OPS,
+ rqstp->rq_opnum[i]))
+ return -ENOBUFS;
+
+ genlmsg_end(skb, hdr);
+ return 0;
+}
+
+/**
+ * nfsd_nl_rpc_status_get_dumpit - Handle rpc_status_get dumpit
+ * @skb: reply buffer
+ * @cb: netlink metadata and command arguments
+ *
+ * Returns the size of the reply or a negative errno.
+ */
+int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct nfsd_net *nn = net_generic(sock_net(skb->sk), nfsd_net_id);
+ int i, ret, rqstp_index = 0;
+
+ rcu_read_lock();
+
+ for (i = 0; i < nn->nfsd_serv->sv_nrpools; i++) {
+ struct svc_rqst *rqstp;
+
+ if (i < cb->args[0]) /* already consumed */
+ continue;
+
+ rqstp_index = 0;
+ list_for_each_entry_rcu(rqstp,
+ &nn->nfsd_serv->sv_pools[i].sp_all_threads,
+ rq_all) {
+ struct nfsd_genl_rqstp genl_rqstp;
+ unsigned int status_counter;
+
+ if (rqstp_index++ < cb->args[1]) /* already consumed */
+ continue;
+ /*
+ * Acquire rq_status_counter before parsing the rqst
+ * fields. rq_status_counter is set to an odd value in
+ * order to notify the consumers the rqstp fields are
+ * meaningful.
+ */
+ status_counter =
+ smp_load_acquire(&rqstp->rq_status_counter);
+ if (!(status_counter & 1))
+ continue;
+
+ genl_rqstp.rq_xid = rqstp->rq_xid;
+ genl_rqstp.rq_flags = rqstp->rq_flags;
+ genl_rqstp.rq_vers = rqstp->rq_vers;
+ genl_rqstp.rq_prog = rqstp->rq_prog;
+ genl_rqstp.rq_proc = rqstp->rq_proc;
+ genl_rqstp.rq_stime = rqstp->rq_stime;
+ genl_rqstp.rq_opcnt = 0;
+ memcpy(&genl_rqstp.rq_daddr, svc_daddr(rqstp),
+ sizeof(struct sockaddr));
+ memcpy(&genl_rqstp.rq_saddr, svc_addr(rqstp),
+ sizeof(struct sockaddr));
+
+#ifdef CONFIG_NFSD_V4
+ if (rqstp->rq_vers == NFS4_VERSION &&
+ rqstp->rq_proc == NFSPROC4_COMPOUND) {
+ /* NFSv4 compound */
+ struct nfsd4_compoundargs *args;
+ int j;
+
+ args = rqstp->rq_argp;
+ genl_rqstp.rq_opcnt = args->opcnt;
+ for (j = 0; j < genl_rqstp.rq_opcnt; j++)
+ genl_rqstp.rq_opnum[j] =
+ args->ops[j].opnum;
+ }
+#endif /* CONFIG_NFSD_V4 */
+
+ /*
+ * Acquire rq_status_counter before reporting the rqst
+ * fields to the user.
+ */
+ if (smp_load_acquire(&rqstp->rq_status_counter) !=
+ status_counter)
+ continue;
+
+ ret = nfsd_genl_rpc_status_compose_msg(skb, cb,
+ &genl_rqstp);
+ if (ret)
+ goto out;
+ }
+ }
+
+ cb->args[0] = i;
+ cb->args[1] = rqstp_index;
+ ret = skb->len;
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
+ * nfsd_nl_rpc_status_get_done - rpc_status_get dumpit post-processing
+ * @cb: netlink metadata and command arguments
+ *
+ * Return values:
+ * %0: Success
+ */
+int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
+{
+ mutex_lock(&nfsd_mutex);
+ nfsd_put(sock_net(cb->skb->sk));
+ mutex_unlock(&nfsd_mutex);
+
+ return 0;
+}
+
+/**
* nfsd_net_init - Prepare the nfsd_net portion of a new net namespace
* @net: a freshly-created network namespace
*
@@ -1589,6 +1787,10 @@ static int __init init_nfsd(void)
retval = register_filesystem(&nfsd_fs_type);
if (retval)
goto out_free_all;
+ retval = genl_register_family(&nfsd_nl_family);
+ if (retval)
+ goto out_free_all;
+
return 0;
out_free_all:
nfsd4_destroy_laundry_wq();
@@ -1613,6 +1815,7 @@ out_free_slabs:
static void __exit exit_nfsd(void)
{
+ genl_unregister_family(&nfsd_nl_family);
unregister_filesystem(&nfsd_fs_type);
nfsd4_destroy_laundry_wq();
unregister_cld_notifier();
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 11c14faa6c67..f5ff42f41ee7 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -62,6 +62,23 @@ struct readdir_cd {
__be32 err; /* 0, nfserr, or nfserr_eof */
};
+/* Maximum number of operations per session compound */
+#define NFSD_MAX_OPS_PER_COMPOUND 50
+
+struct nfsd_genl_rqstp {
+ struct sockaddr rq_daddr;
+ struct sockaddr rq_saddr;
+ unsigned long rq_flags;
+ ktime_t rq_stime;
+ __be32 rq_xid;
+ u32 rq_vers;
+ u32 rq_prog;
+ u32 rq_proc;
+
+ /* NFSv4 compound */
+ u32 rq_opcnt;
+ u32 rq_opnum[NFSD_MAX_OPS_PER_COMPOUND];
+};
extern struct svc_program nfsd_program;
extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4;
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 355bf0db3235..dbfa0ac13564 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -771,7 +771,7 @@ enum fsid_source fsid_source(const struct svc_fh *fhp)
* assume that the new change attr is always logged to stable storage in some
* fashion before the results can be seen.
*/
-u64 nfsd4_change_attribute(struct kstat *stat, struct inode *inode)
+u64 nfsd4_change_attribute(const struct kstat *stat, const struct inode *inode)
{
u64 chattr;
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 40426f899e76..6ebdf7ea27bf 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -293,7 +293,8 @@ static inline void fh_clear_pre_post_attrs(struct svc_fh *fhp)
fhp->fh_pre_saved = false;
}
-u64 nfsd4_change_attribute(struct kstat *stat, struct inode *inode);
+u64 nfsd4_change_attribute(const struct kstat *stat,
+ const struct inode *inode);
__be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp);
__be32 fh_fill_post_attrs(struct svc_fh *fhp);
__be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index c7af1095f6b5..d6122bb2d167 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -572,7 +572,6 @@ static void nfsd_last_thread(struct net *net)
return;
nfsd_shutdown_net(net);
- pr_info("nfsd: last server has exited, flushing export cache\n");
nfsd_export_flush(net);
}
@@ -713,14 +712,13 @@ int nfsd_nrpools(struct net *net)
int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
{
- int i = 0;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv = nn->nfsd_serv;
+ int i;
- if (nn->nfsd_serv != NULL) {
- for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
- nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
- }
-
+ if (serv)
+ for (i = 0; i < serv->sv_nrpools && i < n; i++)
+ nthreads[i] = atomic_read(&serv->sv_pools[i].sp_nrthreads);
return 0;
}
@@ -787,7 +785,6 @@ int
nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
{
int error;
- bool nfsd_up_before;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv;
@@ -807,8 +804,6 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
error = nfsd_create_serv(net);
if (error)
goto out;
-
- nfsd_up_before = nn->nfsd_net_up;
serv = nn->nfsd_serv;
error = nfsd_startup_net(net, cred);
@@ -816,17 +811,15 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
goto out_put;
error = svc_set_num_threads(serv, NULL, nrservs);
if (error)
- goto out_shutdown;
+ goto out_put;
error = serv->sv_nrthreads;
- if (error == 0)
- nfsd_last_thread(net);
-out_shutdown:
- if (error < 0 && !nfsd_up_before)
- nfsd_shutdown_net(net);
out_put:
/* Threads now hold service active */
if (xchg(&nn->keep_active, 0))
svc_put(serv);
+
+ if (serv->sv_nrthreads == 0)
+ nfsd_last_thread(net);
svc_put(serv);
out:
mutex_unlock(&nfsd_mutex);
@@ -957,7 +950,7 @@ nfsd(void *vrqstp)
/*
* The main request loop
*/
- while (!kthread_should_stop()) {
+ while (!svc_thread_should_stop(rqstp)) {
/* Update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nn->max_connections;
@@ -998,6 +991,15 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
+ /*
+ * Release rq_status_counter setting it to an odd value after the rpc
+ * request has been properly parsed. rq_status_counter is used to
+ * notify the consumers if the rqstp fields are stable
+ * (rq_status_counter is odd) or not meaningful (rq_status_counter
+ * is even).
+ */
+ smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter | 1);
+
rp = NULL;
switch (nfsd_cache_lookup(rqstp, &rp)) {
case RC_DOIT:
@@ -1015,6 +1017,12 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
+ /*
+ * Release rq_status_counter setting it to an even value after the rpc
+ * request has been properly processed.
+ */
+ smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter + 1);
+
nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
out_cached_reply:
return 1;
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 4f4282d4eeca..de1e0dfed06a 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -27,12 +27,12 @@ struct nfsd4_layout_ops {
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdevp);
__be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
- struct nfsd4_getdeviceinfo *gdevp);
+ const struct nfsd4_getdeviceinfo *gdevp);
__be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp,
struct nfsd4_layoutget *lgp);
- __be32 (*encode_layoutget)(struct xdr_stream *,
- struct nfsd4_layoutget *lgp);
+ __be32 (*encode_layoutget)(struct xdr_stream *xdr,
+ const struct nfsd4_layoutget *lgp);
__be32 (*proc_layoutcommit)(struct inode *inode,
struct nfsd4_layoutcommit *lcp);
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index cbddcf484dba..f96eaa8e9413 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -117,6 +117,24 @@ struct nfs4_cpntf_state {
time64_t cpntf_time; /* last time stateid used */
};
+struct nfs4_cb_fattr {
+ struct nfsd4_callback ncf_getattr;
+ u32 ncf_cb_status;
+ u32 ncf_cb_bmap[1];
+
+ /* from CB_GETATTR reply */
+ u64 ncf_cb_change;
+ u64 ncf_cb_fsize;
+
+ unsigned long ncf_cb_flags;
+ bool ncf_file_modified;
+ u64 ncf_initial_cinfo;
+ u64 ncf_cur_fsize;
+};
+
+/* bits for ncf_cb_flags */
+#define CB_GETATTR_BUSY 0
+
/*
* Represents a delegation stateid. The nfs4_client holds references to these
* and they are put when it is being destroyed or when the delegation is
@@ -150,6 +168,9 @@ struct nfs4_delegation {
int dl_retries;
struct nfsd4_callback dl_recall;
bool dl_recalled;
+
+ /* for CB_GETATTR */
+ struct nfs4_cb_fattr dl_cb_fattr;
};
#define cb_to_delegation(cb) \
@@ -174,8 +195,6 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
/* Maximum number of slots per session. 160 is useful for long haul TCP */
#define NFSD_MAX_SLOTS_PER_SESSION 160
-/* Maximum number of operations per session compound */
-#define NFSD_MAX_OPS_PER_COMPOUND 50
/* Maximum session per slot cache size */
#define NFSD_SLOT_CACHE_SIZE 2048
/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
@@ -642,6 +661,7 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_SEQUENCE,
NFSPROC4_CLNT_CB_NOTIFY_LOCK,
NFSPROC4_CLNT_CB_RECALL_ANY,
+ NFSPROC4_CLNT_CB_GETATTR,
};
/* Returns true iff a is later than b: */
@@ -734,5 +754,6 @@ static inline bool try_to_expire_client(struct nfs4_client *clp)
}
extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
- struct inode *inode);
+ struct inode *inode, bool *file_modified, u64 *size);
+extern void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 63797635e1c3..12d79f5d4eb1 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -60,7 +60,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
#ifdef CONFIG_NFSD_V4
/* Show count for individual nfsv4 operations */
/* Writing operation numbers 0 1 2 also for maintaining uniformity */
- seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
+ seq_printf(seq, "proc4ops %u", LAST_NFS4_OP + 1);
for (i = 0; i <= LAST_NFS4_OP; i++) {
seq_printf(seq, " %lld",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
@@ -76,7 +76,7 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
+int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
{
int i, err = 0;
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index cf5524e7ca06..14f50c660b61 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -37,9 +37,9 @@ extern struct nfsd_stats nfsdstats;
extern struct svc_stat nfsd_svcstats;
-int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
+int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
+void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
+void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
int nfsd_stat_init(void);
void nfsd_stat_shutdown(void);
@@ -61,22 +61,22 @@ static inline void nfsd_stats_rc_nocache_inc(void)
static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
{
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
- if (exp)
- percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
+ if (exp && exp->ex_stats)
+ percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]);
}
static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
{
percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
- if (exp)
- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount);
}
static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
{
percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
- if (exp)
- percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
+ if (exp && exp->ex_stats)
+ percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount);
}
static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 803904348871..fbc0ccb40424 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -1863,6 +1863,93 @@ TRACE_EVENT(nfsd_end_grace,
)
);
+DECLARE_EVENT_CLASS(nfsd_copy_class,
+ TP_PROTO(
+ const struct nfsd4_copy *copy
+ ),
+ TP_ARGS(copy),
+ TP_STRUCT__entry(
+ __field(bool, intra)
+ __field(bool, async)
+ __field(u32, src_cl_boot)
+ __field(u32, src_cl_id)
+ __field(u32, src_so_id)
+ __field(u32, src_si_generation)
+ __field(u32, dst_cl_boot)
+ __field(u32, dst_cl_id)
+ __field(u32, dst_so_id)
+ __field(u32, dst_si_generation)
+ __field(u64, src_cp_pos)
+ __field(u64, dst_cp_pos)
+ __field(u64, cp_count)
+ __sockaddr(addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ const stateid_t *src_stp = &copy->cp_src_stateid;
+ const stateid_t *dst_stp = &copy->cp_dst_stateid;
+
+ __entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
+ __entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+ __entry->src_cl_boot = src_stp->si_opaque.so_clid.cl_boot;
+ __entry->src_cl_id = src_stp->si_opaque.so_clid.cl_id;
+ __entry->src_so_id = src_stp->si_opaque.so_id;
+ __entry->src_si_generation = src_stp->si_generation;
+ __entry->dst_cl_boot = dst_stp->si_opaque.so_clid.cl_boot;
+ __entry->dst_cl_id = dst_stp->si_opaque.so_clid.cl_id;
+ __entry->dst_so_id = dst_stp->si_opaque.so_id;
+ __entry->dst_si_generation = dst_stp->si_generation;
+ __entry->src_cp_pos = copy->cp_src_pos;
+ __entry->dst_cp_pos = copy->cp_dst_pos;
+ __entry->cp_count = copy->cp_count;
+ __assign_sockaddr(addr, &copy->cp_clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("client=%pISpc intra=%d async=%d "
+ "src_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
+ "dst_stateid[si_generation:0x%x cl_boot:0x%x cl_id:0x%x so_id:0x%x] "
+ "cp_src_pos=%llu cp_dst_pos=%llu cp_count=%llu",
+ __get_sockaddr(addr), __entry->intra, __entry->async,
+ __entry->src_si_generation, __entry->src_cl_boot,
+ __entry->src_cl_id, __entry->src_so_id,
+ __entry->dst_si_generation, __entry->dst_cl_boot,
+ __entry->dst_cl_id, __entry->dst_so_id,
+ __entry->src_cp_pos, __entry->dst_cp_pos, __entry->cp_count
+ )
+);
+
+#define DEFINE_COPY_EVENT(name) \
+DEFINE_EVENT(nfsd_copy_class, nfsd_copy_##name, \
+ TP_PROTO(const struct nfsd4_copy *copy), \
+ TP_ARGS(copy))
+
+DEFINE_COPY_EVENT(inter);
+DEFINE_COPY_EVENT(intra);
+DEFINE_COPY_EVENT(do_async);
+
+TRACE_EVENT(nfsd_copy_done,
+ TP_PROTO(
+ const struct nfsd4_copy *copy,
+ __be32 status
+ ),
+ TP_ARGS(copy, status),
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(bool, intra)
+ __field(bool, async)
+ __sockaddr(addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->status = be32_to_cpu(status);
+ __entry->intra = test_bit(NFSD4_COPY_F_INTRA, &copy->cp_flags);
+ __entry->async = !test_bit(NFSD4_COPY_F_SYNCHRONOUS, &copy->cp_flags);
+ __assign_sockaddr(addr, &copy->cp_clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc status=%d intra=%d async=%d ",
+ __get_sockaddr(addr), __entry->status, __entry->intra, __entry->async
+ )
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 48260cf68fde..fbbea7498f02 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -337,6 +337,24 @@ out:
return err;
}
+static void
+commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp,
+ int err)
+{
+ switch (err) {
+ case -EAGAIN:
+ case -ESTALE:
+ /*
+ * Neither of these are the result of a problem with
+ * durable storage, so avoid a write verifier reset.
+ */
+ break;
+ default:
+ nfsd_reset_write_verifier(nn);
+ trace_nfsd_writeverf_reset(nn, rqstp, err);
+ }
+}
+
/*
* Commit metadata changes to stable storage.
*/
@@ -520,7 +538,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
nfsd_sanitize_attrs(inode, iap);
- if (check_guard && guardtime != inode_get_ctime(inode).tv_sec)
+ if (check_guard && guardtime != inode_get_ctime_sec(inode))
return nfserr_notsync;
/*
@@ -647,8 +665,7 @@ __be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
&nfsd4_get_cstate(rqstp)->current_fh,
dst_pos,
count, status);
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, status);
+ commit_reset_write_verifier(nn, rqstp, status);
ret = nfserrno(status);
}
}
@@ -823,7 +840,7 @@ int nfsd_open_break_lease(struct inode *inode, int access)
* and additional flags.
* N.B. After this call fhp needs an fh_put
*/
-static __be32
+static int
__nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
@@ -831,14 +848,12 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
struct inode *inode;
struct file *file;
int flags = O_RDONLY|O_LARGEFILE;
- __be32 err;
- int host_err = 0;
+ int host_err = -EPERM;
path.mnt = fhp->fh_export->ex_path.mnt;
path.dentry = fhp->fh_dentry;
inode = d_inode(path.dentry);
- err = nfserr_perm;
if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
goto out;
@@ -847,7 +862,7 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
host_err = nfsd_open_break_lease(inode, may_flags);
if (host_err) /* NOMEM or WOULDBLOCK */
- goto out_nfserr;
+ goto out;
if (may_flags & NFSD_MAY_WRITE) {
if (may_flags & NFSD_MAY_READ)
@@ -859,13 +874,13 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
file = dentry_open(&path, flags, current_cred());
if (IS_ERR(file)) {
host_err = PTR_ERR(file);
- goto out_nfserr;
+ goto out;
}
host_err = ima_file_check(file, may_flags);
if (host_err) {
fput(file);
- goto out_nfserr;
+ goto out;
}
if (may_flags & NFSD_MAY_64BIT_COOKIE)
@@ -874,10 +889,8 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
file->f_mode |= FMODE_32BITHASH;
*filp = file;
-out_nfserr:
- err = nfserrno(host_err);
out:
- return err;
+ return host_err;
}
__be32
@@ -885,6 +898,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
__be32 err;
+ int host_err;
bool retried = false;
validate_process_creds();
@@ -904,12 +918,13 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
retry:
err = fh_verify(rqstp, fhp, type, may_flags);
if (!err) {
- err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
- if (err == nfserr_stale && !retried) {
+ host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+ if (host_err == -EOPENSTALE && !retried) {
retried = true;
fh_put(fhp);
goto retry;
}
+ err = nfserrno(host_err);
}
validate_process_creds();
return err;
@@ -922,13 +937,13 @@ retry:
* @may_flags: internal permission flags
* @filp: OUT: open "struct file *"
*
- * Returns an nfsstat value in network byte order.
+ * Returns zero on success, or a negative errno value.
*/
-__be32
+int
nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
struct file **filp)
{
- __be32 err;
+ int err;
validate_process_creds();
err = __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
@@ -1172,8 +1187,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
host_err = vfs_iter_write(file, &iter, &pos, flags);
file_end_write(file);
if (host_err < 0) {
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, host_err);
+ commit_reset_write_verifier(nn, rqstp, host_err);
goto out_nfserr;
}
*cnt = host_err;
@@ -1185,10 +1199,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
if (stable && use_wgather) {
host_err = wait_for_concurrent_writes(file);
- if (host_err < 0) {
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, host_err);
- }
+ if (host_err < 0)
+ commit_reset_write_verifier(nn, rqstp, host_err);
}
out_nfserr:
@@ -1331,8 +1343,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
err = nfserr_notsupp;
break;
default:
- nfsd_reset_write_verifier(nn);
- trace_nfsd_writeverf_reset(nn, rqstp, err2);
+ commit_reset_write_verifier(nn, rqstp, err2);
err = nfserrno(err2);
}
} else
@@ -1788,6 +1799,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
+ err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+ goto out;
+ if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+ goto out;
+
retry:
host_err = fh_want_write(ffhp);
if (host_err) {
@@ -1823,12 +1840,6 @@ retry:
if (ndentry == trap)
goto out_dput_new;
- host_err = -EXDEV;
- if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
- goto out_dput_new;
- if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
- goto out_dput_new;
-
if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
nfsd_has_cached_files(ndentry)) {
close_cached = true;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index a6890ea7b765..e3c29596f4df 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -104,8 +104,8 @@ __be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
int nfsd_open_break_lease(struct inode *, int);
__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t,
int, struct file **);
-__be32 nfsd_open_verified(struct svc_rqst *, struct svc_fh *,
- int, struct file **);
+int nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ int may_flags, struct file **filp);
__be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset,
unsigned long *count,
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 9d918a79dc16..80e859dc84d8 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -50,6 +50,134 @@
#define HAS_CSTATE_FLAG(c, f) ((c)->sid_flags & (f))
#define CLEAR_CSTATE_FLAG(c, f) ((c)->sid_flags &= ~(f))
+/**
+ * nfsd4_encode_bool - Encode an XDR bool type result
+ * @xdr: target XDR stream
+ * @val: boolean value to encode
+ *
+ * Return values:
+ * %nfs_ok: @val encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_bool(struct xdr_stream *xdr, bool val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ *p = val ? xdr_one : xdr_zero;
+ return nfs_ok;
+}
+
+/**
+ * nfsd4_encode_uint32_t - Encode an XDR uint32_t type result
+ * @xdr: target XDR stream
+ * @val: integer value to encode
+ *
+ * Return values:
+ * %nfs_ok: @val encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_uint32_t(struct xdr_stream *xdr, u32 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ *p = cpu_to_be32(val);
+ return nfs_ok;
+}
+
+#define nfsd4_encode_aceflag4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_acemask4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_acetype4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_count4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_mode4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_nfs_lease4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_qop4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_sequenceid4(x, v) nfsd4_encode_uint32_t(x, v)
+#define nfsd4_encode_slotid4(x, v) nfsd4_encode_uint32_t(x, v)
+
+/**
+ * nfsd4_encode_uint64_t - Encode an XDR uint64_t type result
+ * @xdr: target XDR stream
+ * @val: integer value to encode
+ *
+ * Return values:
+ * %nfs_ok: @val encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_uint64_t(struct xdr_stream *xdr, u64 val)
+{
+ __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ put_unaligned_be64(val, p);
+ return nfs_ok;
+}
+
+#define nfsd4_encode_changeid4(x, v) nfsd4_encode_uint64_t(x, v)
+#define nfsd4_encode_nfs_cookie4(x, v) nfsd4_encode_uint64_t(x, v)
+#define nfsd4_encode_length4(x, v) nfsd4_encode_uint64_t(x, v)
+#define nfsd4_encode_offset4(x, v) nfsd4_encode_uint64_t(x, v)
+
+/**
+ * nfsd4_encode_opaque_fixed - Encode a fixed-length XDR opaque type result
+ * @xdr: target XDR stream
+ * @data: pointer to data
+ * @size: length of data in bytes
+ *
+ * Return values:
+ * %nfs_ok: @data encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_opaque_fixed(struct xdr_stream *xdr, const void *data,
+ size_t size)
+{
+ __be32 *p = xdr_reserve_space(xdr, xdr_align_size(size));
+ size_t pad = xdr_pad_size(size);
+
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ memcpy(p, data, size);
+ if (pad)
+ memset((char *)p + size, 0, pad);
+ return nfs_ok;
+}
+
+/**
+ * nfsd4_encode_opaque - Encode a variable-length XDR opaque type result
+ * @xdr: target XDR stream
+ * @data: pointer to data
+ * @size: length of data in bytes
+ *
+ * Return values:
+ * %nfs_ok: @data encoded; @xdr advanced to next position
+ * %nfserr_resource: stream buffer space exhausted
+ */
+static __always_inline __be32
+nfsd4_encode_opaque(struct xdr_stream *xdr, const void *data, size_t size)
+{
+ size_t pad = xdr_pad_size(size);
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(size));
+ if (unlikely(p == NULL))
+ return nfserr_resource;
+ *p++ = cpu_to_be32(size);
+ memcpy(p, data, size);
+ if (pad)
+ memset((char *)p + size, 0, pad);
+ return nfs_ok;
+}
+
+#define nfsd4_encode_component4(x, d, s) nfsd4_encode_opaque(x, d, s)
+
struct nfsd4_compound_state {
struct svc_fh current_fh;
struct svc_fh save_fh;
@@ -170,12 +298,8 @@ struct nfsd4_lock {
} v;
/* response */
- union {
- struct {
- stateid_t stateid;
- } ok;
- struct nfsd4_lock_denied denied;
- } u;
+ stateid_t lk_resp_stateid;
+ struct nfsd4_lock_denied lk_denied;
};
#define lk_new_open_seqid v.new.open_seqid
#define lk_new_open_stateid v.new.open_stateid
@@ -185,20 +309,15 @@ struct nfsd4_lock {
#define lk_old_lock_stateid v.old.lock_stateid
#define lk_old_lock_seqid v.old.lock_seqid
-#define lk_resp_stateid u.ok.stateid
-#define lk_denied u.denied
-
-
struct nfsd4_lockt {
u32 lt_type;
clientid_t lt_clientid;
struct xdr_netobj lt_owner;
u64 lt_offset;
u64 lt_length;
- struct nfsd4_lock_denied lt_denied;
+ struct nfsd4_lock_denied lt_denied;
};
-
struct nfsd4_locku {
u32 lu_type;
u32 lu_seqid;
@@ -267,9 +386,9 @@ struct nfsd4_open {
u32 op_deleg_want; /* request */
stateid_t op_stateid; /* response */
__be32 op_xdr_error; /* see nfsd4_open_omfg() */
- u32 op_recall; /* recall */
struct nfsd4_change_info op_cinfo; /* response */
u32 op_rflags; /* response */
+ bool op_recall; /* response */
bool op_truncate; /* used during processing */
bool op_created; /* used during processing */
struct nfs4_openowner *op_openowner; /* used during processing */
@@ -496,7 +615,7 @@ struct nfsd4_layoutcommit {
u32 lc_layout_type; /* request */
u32 lc_up_len; /* layout length */
void *lc_up_layout; /* decoded by callback */
- u32 lc_size_chg; /* boolean for response */
+ bool lc_size_chg; /* response */
u64 lc_newsize; /* response */
};
@@ -508,7 +627,7 @@ struct nfsd4_layoutreturn {
u32 lrf_body_len; /* request */
void *lrf_body; /* request */
stateid_t lr_sid; /* request/response */
- u32 lrs_present; /* response */
+ bool lrs_present; /* response */
};
struct nfsd4_fallocate {
@@ -626,8 +745,7 @@ struct nfsd4_copy_notify {
/* response */
stateid_t cpn_cnr_stateid;
- u64 cpn_sec;
- u32 cpn_nsec;
+ struct timespec64 cpn_lease_time;
struct nl4_server *cpn_src;
};
@@ -820,8 +938,10 @@ extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, union nfsd4_op_u *u);
extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
union nfsd4_op_u *u);
+extern void nfsd4_lock_release(union nfsd4_op_u *u);
extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
union nfsd4_op_u *u);
+extern void nfsd4_lockt_release(union nfsd4_op_u *u);
extern __be32 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
union nfsd4_op_u *u);
extern __be32
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h
index 0d39af1b00a0..e8b00309c449 100644
--- a/fs/nfsd/xdr4cb.h
+++ b/fs/nfsd/xdr4cb.h
@@ -54,3 +54,21 @@
#define NFS4_dec_cb_recall_any_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
+
+/*
+ * 1: CB_GETATTR opcode (32-bit)
+ * N: file_handle
+ * 1: number of entry in attribute array (32-bit)
+ * 1: entry 0 in attribute array (32-bit)
+ */
+#define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
+ 1 + enc_nfs4_fh_sz + 1 + 1)
+/*
+ * 4: fattr_bitmap_maxsz
+ * 1: attribute array len
+ * 2: change attr (64-bit)
+ * 2: size (64-bit)
+ */
+#define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz)
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index bce734b68f08..de2073c47651 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -429,7 +429,7 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, mapping, from, to);
nilfs_put_page(page);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
/*
@@ -519,7 +519,7 @@ got_it:
de->inode = cpu_to_le64(inode->i_ino);
nilfs_set_de_type(de, inode);
nilfs_commit_chunk(page, page->mapping, from, to);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
nilfs_mark_inode_dirty(dir);
/* OFFSET_CACHE */
out_put:
@@ -567,7 +567,7 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
pde->rec_len = nilfs_rec_len_to_disk(to - from);
dir->inode = 0;
nilfs_commit_chunk(page, mapping, from, to);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
out:
nilfs_put_page(page);
return err;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 1a8bd5993476..f861f3a0bf5c 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -366,7 +366,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
atomic64_inc(&root->inodes_count);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = ino;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
err = nilfs_bmap_read(ii->i_bmap, NULL);
@@ -449,12 +449,12 @@ int nilfs_read_inode_common(struct inode *inode,
i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
inode->i_size = le64_to_cpu(raw_inode->i_size);
- inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
+ inode_set_atime(inode, le64_to_cpu(raw_inode->i_mtime),
+ le32_to_cpu(raw_inode->i_mtime_nsec));
inode_set_ctime(inode, le64_to_cpu(raw_inode->i_ctime),
le32_to_cpu(raw_inode->i_ctime_nsec));
- inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
- inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
+ inode_set_mtime(inode, le64_to_cpu(raw_inode->i_mtime),
+ le32_to_cpu(raw_inode->i_mtime_nsec));
if (nilfs_is_metadata_file_inode(inode) && !S_ISREG(inode->i_mode))
return -EIO; /* this inode is for metadata and corrupted */
if (inode->i_nlink == 0)
@@ -768,10 +768,10 @@ void nilfs_write_inode_common(struct inode *inode,
raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le64(inode->i_size);
- raw_inode->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ raw_inode->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ raw_inode->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
+ raw_inode->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ raw_inode->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
raw_inode->i_flags = cpu_to_le32(ii->i_flags);
@@ -875,7 +875,7 @@ void nilfs_truncate(struct inode *inode)
nilfs_truncate_bmap(ii, blkoff);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index ebdcc25df0f7..869b016014d2 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -265,7 +265,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
- struct file *f;
+ struct file *f = NULL;
int destroy = 0, error = 0;
__u32 mask;
@@ -345,7 +345,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg)
}
rcu_read_lock();
- f = lookup_fd_rcu(fd);
+ f = lookup_fdget_rcu(fd);
rcu_read_unlock();
/* if (f != filp) means that we lost a race and another task/thread
@@ -392,6 +392,8 @@ out_err:
fsnotify_put_mark(new_fsn_mark);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
+ if (f)
+ fput(f);
return error;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index f69c451018e3..62fe0b679e58 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1585,16 +1585,25 @@ static int fanotify_test_fsid(struct dentry *dentry, __kernel_fsid_t *fsid)
}
/* Check if filesystem can encode a unique fid */
-static int fanotify_test_fid(struct dentry *dentry)
+static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
{
+ unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+ const struct export_operations *nop = dentry->d_sb->s_export_op;
+
+ /*
+ * We need to make sure that the filesystem supports encoding of
+ * file handles so user can use name_to_handle_at() to compare fids
+ * reported with events to the file handle of watched objects.
+ */
+ if (!nop)
+ return -EOPNOTSUPP;
+
/*
- * We need to make sure that the file system supports at least
- * encoding a file handle so user can use name_to_handle_at() to
- * compare fid returned with event to the file handle of watched
- * objects. However, even the relaxed AT_HANDLE_FID flag requires
- * at least empty export_operations for ecoding unique file ids.
+ * For sb/mount mark, we also need to make sure that the filesystem
+ * supports decoding file handles, so user has a way to map back the
+ * reported fids to filesystem objects.
*/
- if (!dentry->d_sb->s_export_op)
+ if (mark_type != FAN_MARK_INODE && !nop->fh_to_dentry)
return -EOPNOTSUPP;
return 0;
@@ -1812,7 +1821,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
if (ret)
goto path_put_and_out;
- ret = fanotify_test_fid(path.dentry);
+ ret = fanotify_test_fid(path.dentry, flags);
if (ret)
goto path_put_and_out;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 647a22433bd8..9a4b228d42fa 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -84,7 +84,7 @@ slow:
return -ENOMEM;
}
inode->i_ino = ns->inum;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_flags |= S_IMMUTABLE;
inode->i_mode = S_IFREG | S_IRUGO;
inode->i_fop = &ns_file_operations;
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 99ac6ea277c4..aba1e22db4e9 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -648,7 +648,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
* mtime is the last change of the data within the file. Not changed
* when only metadata is changed, e.g. a rename doesn't affect mtime.
*/
- vi->i_mtime = ntfs2utc(si->last_data_change_time);
+ inode_set_mtime_to_ts(vi, ntfs2utc(si->last_data_change_time));
/*
* ctime is the last change of the metadata of the file. This obviously
* always changes, when mtime is changed. ctime can be changed on its
@@ -659,7 +659,7 @@ static int ntfs_read_locked_inode(struct inode *vi)
* Last access to the data within the file. Not changed during a rename
* for example but changed whenever the file is written to.
*/
- vi->i_atime = ntfs2utc(si->last_access_time);
+ inode_set_atime_to_ts(vi, ntfs2utc(si->last_access_time));
/* Find the attribute list attribute if present. */
ntfs_attr_reinit_search_ctx(ctx);
@@ -1217,9 +1217,9 @@ static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi)
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
- vi->i_mtime = base_vi->i_mtime;
+ inode_set_mtime_to_ts(vi, inode_get_mtime(base_vi));
inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
- vi->i_atime = base_vi->i_atime;
+ inode_set_atime_to_ts(vi, inode_get_atime(base_vi));
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
@@ -1483,9 +1483,9 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
vi->i_uid = base_vi->i_uid;
vi->i_gid = base_vi->i_gid;
set_nlink(vi, base_vi->i_nlink);
- vi->i_mtime = base_vi->i_mtime;
+ inode_set_mtime_to_ts(vi, inode_get_mtime(base_vi));
inode_set_ctime_to_ts(vi, inode_get_ctime(base_vi));
- vi->i_atime = base_vi->i_atime;
+ inode_set_atime_to_ts(vi, inode_get_atime(base_vi));
vi->i_generation = ni->seq_no = base_ni->seq_no;
/* Set inode type to zero but preserve permissions. */
vi->i_mode = base_vi->i_mode & ~S_IFMT;
@@ -2805,13 +2805,14 @@ done:
if (!IS_NOCMTIME(VFS_I(base_ni)) && !IS_RDONLY(VFS_I(base_ni))) {
struct timespec64 now = current_time(VFS_I(base_ni));
struct timespec64 ctime = inode_get_ctime(VFS_I(base_ni));
+ struct timespec64 mtime = inode_get_mtime(VFS_I(base_ni));
int sync_it = 0;
- if (!timespec64_equal(&VFS_I(base_ni)->i_mtime, &now) ||
+ if (!timespec64_equal(&mtime, &now) ||
!timespec64_equal(&ctime, &now))
sync_it = 1;
inode_set_ctime_to_ts(VFS_I(base_ni), now);
- VFS_I(base_ni)->i_mtime = now;
+ inode_set_mtime_to_ts(VFS_I(base_ni), now);
if (sync_it)
mark_inode_dirty_sync(VFS_I(base_ni));
@@ -2925,9 +2926,9 @@ int ntfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
}
}
if (ia_valid & ATTR_ATIME)
- vi->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(vi, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
- vi->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(vi, attr->ia_mtime);
if (ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(vi, attr->ia_ctime);
mark_inode_dirty(vi);
@@ -2996,7 +2997,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si = (STANDARD_INFORMATION*)((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset));
/* Update the access times if they have changed. */
- nt = utc2ntfs(vi->i_mtime);
+ nt = utc2ntfs(inode_get_mtime(vi));
if (si->last_data_change_time != nt) {
ntfs_debug("Updating mtime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino, (long long)
@@ -3014,7 +3015,7 @@ int __ntfs_write_inode(struct inode *vi, int sync)
si->last_mft_change_time = nt;
modified = true;
}
- nt = utc2ntfs(vi->i_atime);
+ nt = utc2ntfs(inode_get_atime(vi));
if (si->last_access_time != nt) {
ntfs_debug("Updating atime for inode 0x%lx: old = 0x%llx, "
"new = 0x%llx", vi->i_ino,
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index ad1a8f72da22..6fd1dc4b08c8 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -2682,7 +2682,7 @@ mft_rec_already_initialized:
vi->i_mode &= ~S_IWUGO;
/* Set the inode times to the current time. */
- vi->i_atime = vi->i_mtime = inode_set_ctime_current(vi);
+ simple_inode_init_ts(vi);
/*
* Set the file size to 0, the ntfs inode sizes are set to 0 by
* the call to ntfs_init_big_inode() below.
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index a9d82bbb4729..63f70259edc0 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -1106,10 +1106,10 @@ repack:
}
}
- /*
+ /*
* The code below may require additional cluster (to extend attribute list)
- * and / or one MFT record
- * It is too complex to undo operations if -ENOSPC occurs deep inside
+ * and / or one MFT record
+ * It is too complex to undo operations if -ENOSPC occurs deep inside
* in 'ni_insert_nonresident'.
* Return in advance -ENOSPC here if there are no free cluster and no free MFT.
*/
@@ -1736,10 +1736,8 @@ repack:
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
0, NULL, &mi_b);
- if (!attr_b) {
- err = -ENOENT;
- goto out;
- }
+ if (!attr_b)
+ return -ENOENT;
attr = attr_b;
le = le_b;
diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c
index 42631b31adf1..7c01735d1219 100644
--- a/fs/ntfs3/attrlist.c
+++ b/fs/ntfs3/attrlist.c
@@ -52,7 +52,8 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (!attr->non_res) {
lsize = le32_to_cpu(attr->res.data_size);
- le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ /* attr is resident: lsize < record_size (1K or 4K) */
+ le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
if (!le) {
err = -ENOMEM;
goto out;
@@ -80,7 +81,17 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
if (err < 0)
goto out;
- le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+ /* attr is nonresident.
+ * The worst case:
+ * 1T (2^40) extremely fragmented file.
+ * cluster = 4K (2^12) => 2^28 fragments
+ * 2^9 fragments per one record => 2^19 records
+ * 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes.
+ *
+ * the result is 16M bytes per attribute list.
+ * Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes]
+ */
+ le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
if (!le) {
err = -ENOMEM;
goto out;
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 107e808e06ea..63f14a0232f6 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -125,6 +125,7 @@ void wnd_close(struct wnd_bitmap *wnd)
struct rb_node *node, *next;
kfree(wnd->free_bits);
+ wnd->free_bits = NULL;
run_close(&wnd->run);
node = rb_first(&wnd->start_tree);
@@ -659,7 +660,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
wnd->bits_last = wbits;
wnd->free_bits =
- kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
+ kvmalloc_array(wnd->nwnd, sizeof(u16), GFP_KERNEL | __GFP_ZERO);
+
if (!wnd->free_bits)
return -ENOMEM;
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 063a6654199b..ec0566b322d5 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -309,7 +309,11 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
return 0;
}
- dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+ /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
+ if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
+ dt_type = DT_LNK;
+ else
+ dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
}
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 962f12ce6c0a..ad4a70b5d432 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -342,7 +342,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
err = 0;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
if (IS_SYNC(inode)) {
@@ -400,7 +400,7 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
ni_unlock(ni);
ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (!IS_DIRSYNC(inode)) {
dirty = 1;
} else {
@@ -642,7 +642,7 @@ out:
filemap_invalidate_unlock(mapping);
if (!err) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
}
@@ -745,8 +745,8 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
}
static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
- struct pipe_inode_info *pipe,
- size_t len, unsigned int flags)
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
{
struct inode *inode = in->f_mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 2b85cb10f0be..3df2d9e34b91 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -2148,7 +2148,7 @@ out1:
for (i = 0; i < pages_per_frame; i++) {
pg = pages[i];
- if (i == idx)
+ if (i == idx || !pg)
continue;
unlock_page(pg);
put_page(pg);
@@ -3208,6 +3208,12 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup)))
continue;
+ /* Check simple case when parent inode equals current inode. */
+ if (ino_get(&fname->home) == ni->vfs_inode.i_ino) {
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ continue;
+ }
+
/* ntfs_iget5 may sleep. */
dir = ntfs_iget5(sb, &fname->home, NULL);
if (IS_ERR(dir)) {
@@ -3265,7 +3271,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
if (is_rec_inuse(ni->mi.mrec) &&
!(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) {
bool modified = false;
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts;
/* Update times in standard attribute. */
std = ni_std(ni);
@@ -3275,19 +3281,22 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint)
}
/* Update the access times if they have changed. */
- dup.m_time = kernel2nt(&inode->i_mtime);
+ ts = inode_get_mtime(inode);
+ dup.m_time = kernel2nt(&ts);
if (std->m_time != dup.m_time) {
std->m_time = dup.m_time;
modified = true;
}
- dup.c_time = kernel2nt(&ctime);
+ ts = inode_get_mtime(inode);
+ dup.c_time = kernel2nt(&ts);
if (std->c_time != dup.c_time) {
std->c_time = dup.c_time;
modified = true;
}
- dup.a_time = kernel2nt(&inode->i_atime);
+ ts = inode_get_atime(inode);
+ dup.a_time = kernel2nt(&ts);
if (std->a_time != dup.a_time) {
std->a_time = dup.a_time;
modified = true;
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 12f28cdf5c83..98ccb6650858 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -2168,8 +2168,10 @@ file_is_valid:
if (!page) {
page = kmalloc(log->page_size, GFP_NOFS);
- if (!page)
- return -ENOMEM;
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
}
/*
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 33afee0f5559..fbfe21dbb425 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -983,18 +983,11 @@ out:
if (err)
return err;
- mark_inode_dirty(&ni->vfs_inode);
+ mark_inode_dirty_sync(&ni->vfs_inode);
/* verify(!ntfs_update_mftmirr()); */
- /*
- * If we used wait=1, sync_inode_metadata waits for the io for the
- * inode to finish. It hangs when media is removed.
- * So wait=0 is sent down to sync_inode_metadata
- * and filemap_fdatawrite is used for the data blocks.
- */
- err = sync_inode_metadata(&ni->vfs_inode, 0);
- if (!err)
- err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
+ /* write mft record on disk. */
+ err = _ni_write_inode(&ni->vfs_inode, 1);
return err;
}
@@ -2461,10 +2454,12 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
{
CLST end, i, zone_len, zlen;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
+ bool dirty = false;
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
if (!wnd_is_used(wnd, lcn, len)) {
- ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+ /* mark volume as dirty out of wnd->rw_lock */
+ dirty = true;
end = lcn + len;
len = 0;
@@ -2518,6 +2513,8 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
out:
up_write(&wnd->rw_lock);
+ if (dirty)
+ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
}
/*
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 124c6e822623..cf92b2433f7a 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -729,6 +729,9 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
u32 total = le32_to_cpu(hdr->total);
u16 offs[128];
+ if (unlikely(!cmp))
+ return NULL;
+
fill_table:
if (end > total)
return NULL;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index eb2ed0701495..5e3d71374918 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -44,7 +44,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
u64 t64;
struct MFT_REC *rec;
struct runs_tree *run;
- struct timespec64 ctime;
+ struct timespec64 ts;
inode->i_op = NULL;
/* Setup 'uid' and 'gid' */
@@ -169,10 +169,12 @@ next_attr:
#ifdef STATX_BTIME
nt2kernel(std5->cr_time, &ni->i_crtime);
#endif
- nt2kernel(std5->a_time, &inode->i_atime);
- ctime = inode_get_ctime(inode);
- nt2kernel(std5->c_time, &ctime);
- nt2kernel(std5->m_time, &inode->i_mtime);
+ nt2kernel(std5->a_time, &ts);
+ inode_set_atime_to_ts(inode, ts);
+ nt2kernel(std5->c_time, &ts);
+ inode_set_ctime_to_ts(inode, ts);
+ nt2kernel(std5->m_time, &ts);
+ inode_set_mtime_to_ts(inode, ts);
ni->std_fa = std5->fa;
@@ -960,7 +962,8 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
if (err >= 0) {
if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
dirty = true;
}
@@ -1660,8 +1663,11 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
/* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */
- inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode, ni->i_crtime);
- dir->i_mtime = inode_set_ctime_to_ts(dir, ni->i_crtime);
+ inode_set_atime_to_ts(inode, ni->i_crtime);
+ inode_set_ctime_to_ts(inode, ni->i_crtime);
+ inode_set_mtime_to_ts(inode, ni->i_crtime);
+ inode_set_mtime_to_ts(dir, ni->i_crtime);
+ inode_set_ctime_to_ts(dir, ni->i_crtime);
mark_inode_dirty(dir);
mark_inode_dirty(inode);
@@ -1767,7 +1773,7 @@ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry)
if (!err) {
drop_nlink(inode);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
inode_set_ctime_to_ts(inode, inode_get_ctime(dir));
if (inode->i_nlink)
diff --git a/fs/ntfs3/namei.c b/fs/ntfs3/namei.c
index ad430d50bd79..ee3093be5170 100644
--- a/fs/ntfs3/namei.c
+++ b/fs/ntfs3/namei.c
@@ -156,8 +156,8 @@ static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
err = ntfs_link_inode(inode, de);
if (!err) {
- dir->i_mtime = inode_set_ctime_to_ts(inode,
- inode_set_ctime_current(dir));
+ inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(inode);
mark_inode_dirty(dir);
d_instantiate(de, inode);
@@ -373,7 +373,7 @@ static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
#ifdef CONFIG_NTFS3_FS_POSIX_ACL
if (IS_POSIXACL(dir)) {
- /*
+ /*
* Load in cache current acl to avoid ni_lock(dir):
* ntfs_create_inode -> ntfs_init_acl -> posix_acl_create ->
* ntfs_get_acl -> ntfs_get_acl_ex -> ni_lock
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 98b76d1b09e7..86aecbb01a92 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -847,7 +847,7 @@ struct OBJECT_ID {
// Birth Volume Id is the Object Id of the Volume on.
// which the Object Id was allocated. It never changes.
struct GUID BirthVolumeId; //0x10:
-
+
// Birth Object Id is the first Object Id that was
// ever assigned to this MFT Record. I.e. If the Object Id
// is changed for some reason, this field will reflect the
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 629403ede6e5..f6706143d14b 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -42,9 +42,11 @@ enum utf16_endian;
#define MINUS_ONE_T ((size_t)(-1))
/* Biggest MFT / smallest cluster */
#define MAXIMUM_BYTES_PER_MFT 4096
+#define MAXIMUM_SHIFT_BYTES_PER_MFT 12
#define NTFS_BLOCKS_PER_MFT_RECORD (MAXIMUM_BYTES_PER_MFT / 512)
#define MAXIMUM_BYTES_PER_INDEX 4096
+#define MAXIMUM_SHIFT_BYTES_PER_INDEX 12
#define NTFS_BLOCKS_PER_INODE (MAXIMUM_BYTES_PER_INDEX / 512)
/* NTFS specific error code when fixup failed. */
@@ -495,8 +497,6 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
struct kstat *stat, u32 request_mask, u32 flags);
int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr);
-void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
- CLST len);
int ntfs_file_open(struct inode *inode, struct file *file);
int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
@@ -872,7 +872,7 @@ int ntfs_init_acl(struct mnt_idmap *idmap, struct inode *inode,
int ntfs_acl_chmod(struct mnt_idmap *idmap, struct dentry *dentry);
ssize_t ntfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
-extern const struct xattr_handler *ntfs_xattr_handlers[];
+extern const struct xattr_handler * const ntfs_xattr_handlers[];
int ntfs_save_wsl_perm(struct inode *inode, __le16 *ea_size);
void ntfs_get_wsl_perm(struct inode *inode);
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index c12ebffc94da..53629b1f65e9 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -189,12 +189,19 @@ out:
return err;
}
+/*
+ * mi_enum_attr - start/continue attributes enumeration in record.
+ *
+ * NOTE: mi->mrec - memory of size sbi->record_size
+ * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
+ */
struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
{
const struct MFT_REC *rec = mi->mrec;
u32 used = le32_to_cpu(rec->used);
- u32 t32, off, asize;
+ u32 t32, off, asize, prev_type;
u16 t16;
+ u64 data_size, alloc_size, tot_size;
if (!attr) {
u32 total = le32_to_cpu(rec->total);
@@ -213,6 +220,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
if (!is_rec_inuse(rec))
return NULL;
+ prev_type = 0;
attr = Add2Ptr(rec, off);
} else {
/* Check if input attr inside record. */
@@ -226,11 +234,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
return NULL;
}
- if (off + asize < off) {
- /* Overflow check. */
+ /* Overflow check. */
+ if (off + asize < off)
return NULL;
- }
+ prev_type = le32_to_cpu(attr->type);
attr = Add2Ptr(attr, asize);
off += asize;
}
@@ -250,7 +258,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
/* 0x100 is last known attribute for now. */
t32 = le32_to_cpu(attr->type);
- if ((t32 & 0xf) || (t32 > 0x100))
+ if (!t32 || (t32 & 0xf) || (t32 > 0x100))
+ return NULL;
+
+ /* attributes in record must be ordered by type */
+ if (t32 < prev_type)
return NULL;
/* Check overflow and boundary. */
@@ -259,16 +271,15 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
/* Check size of attribute. */
if (!attr->non_res) {
+ /* Check resident fields. */
if (asize < SIZEOF_RESIDENT)
return NULL;
t16 = le16_to_cpu(attr->res.data_off);
-
if (t16 > asize)
return NULL;
- t32 = le32_to_cpu(attr->res.data_size);
- if (t16 + t32 > asize)
+ if (t16 + le32_to_cpu(attr->res.data_size) > asize)
return NULL;
t32 = sizeof(short) * attr->name_len;
@@ -278,21 +289,52 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
return attr;
}
- /* Check some nonresident fields. */
- if (attr->name_len &&
- le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
- le16_to_cpu(attr->nres.run_off)) {
+ /* Check nonresident fields. */
+ if (attr->non_res != 1)
+ return NULL;
+
+ t16 = le16_to_cpu(attr->nres.run_off);
+ if (t16 > asize)
+ return NULL;
+
+ t32 = sizeof(short) * attr->name_len;
+ if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
+ return NULL;
+
+ /* Check start/end vcn. */
+ if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
return NULL;
- }
- if (attr->nres.svcn || !is_attr_ext(attr)) {
+ data_size = le64_to_cpu(attr->nres.data_size);
+ if (le64_to_cpu(attr->nres.valid_size) > data_size)
+ return NULL;
+
+ alloc_size = le64_to_cpu(attr->nres.alloc_size);
+ if (data_size > alloc_size)
+ return NULL;
+
+ t32 = mi->sbi->cluster_mask;
+ if (alloc_size & t32)
+ return NULL;
+
+ if (!attr->nres.svcn && is_attr_ext(attr)) {
+ /* First segment of sparse/compressed attribute */
+ if (asize + 8 < SIZEOF_NONRESIDENT_EX)
+ return NULL;
+
+ tot_size = le64_to_cpu(attr->nres.total_size);
+ if (tot_size & t32)
+ return NULL;
+
+ if (tot_size > alloc_size)
+ return NULL;
+ } else {
if (asize + 8 < SIZEOF_NONRESIDENT)
return NULL;
if (attr->nres.c_unit)
return NULL;
- } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
- return NULL;
+ }
return attr;
}
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 5661a363005e..f763e3256ccc 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -453,15 +453,23 @@ static struct proc_dir_entry *proc_info_root;
* ntfs3.1
* cluster size
* number of clusters
+ * total number of mft records
+ * number of used mft records ~= number of files + folders
+ * real state of ntfs "dirty"/"clean"
+ * current state of ntfs "dirty"/"clean"
*/
static int ntfs3_volinfo(struct seq_file *m, void *o)
{
struct super_block *sb = m->private;
struct ntfs_sb_info *sbi = sb->s_fs_info;
- seq_printf(m, "ntfs%d.%d\n%u\n%zu\n", sbi->volume.major_ver,
- sbi->volume.minor_ver, sbi->cluster_size,
- sbi->used.bitmap.nbits);
+ seq_printf(m, "ntfs%d.%d\n%u\n%zu\n\%zu\n%zu\n%s\n%s\n",
+ sbi->volume.major_ver, sbi->volume.minor_ver,
+ sbi->cluster_size, sbi->used.bitmap.nbits,
+ sbi->mft.bitmap.nbits,
+ sbi->mft.bitmap.nbits - wnd_zeroes(&sbi->mft.bitmap),
+ sbi->volume.real_dirty ? "dirty" : "clean",
+ (sbi->volume.flags & VOLUME_FLAG_DIRTY) ? "dirty" : "clean");
return 0;
}
@@ -488,9 +496,13 @@ static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
{
int err;
struct super_block *sb = pde_data(file_inode(file));
- struct ntfs_sb_info *sbi = sb->s_fs_info;
ssize_t ret = count;
- u8 *label = kmalloc(count, GFP_NOFS);
+ u8 *label;
+
+ if (sb_rdonly(sb))
+ return -EROFS;
+
+ label = kmalloc(count, GFP_NOFS);
if (!label)
return -ENOMEM;
@@ -502,7 +514,7 @@ static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
while (ret > 0 && label[ret - 1] == '\n')
ret -= 1;
- err = ntfs_set_label(sbi, label, ret);
+ err = ntfs_set_label(sb->s_fs_info, label, ret);
if (err < 0) {
ntfs_err(sb, "failed (%d) to write label", err);
@@ -576,20 +588,30 @@ static noinline void ntfs3_put_sbi(struct ntfs_sb_info *sbi)
wnd_close(&sbi->mft.bitmap);
wnd_close(&sbi->used.bitmap);
- if (sbi->mft.ni)
+ if (sbi->mft.ni) {
iput(&sbi->mft.ni->vfs_inode);
+ sbi->mft.ni = NULL;
+ }
- if (sbi->security.ni)
+ if (sbi->security.ni) {
iput(&sbi->security.ni->vfs_inode);
+ sbi->security.ni = NULL;
+ }
- if (sbi->reparse.ni)
+ if (sbi->reparse.ni) {
iput(&sbi->reparse.ni->vfs_inode);
+ sbi->reparse.ni = NULL;
+ }
- if (sbi->objid.ni)
+ if (sbi->objid.ni) {
iput(&sbi->objid.ni->vfs_inode);
+ sbi->objid.ni = NULL;
+ }
- if (sbi->volume.ni)
+ if (sbi->volume.ni) {
iput(&sbi->volume.ni->vfs_inode);
+ sbi->volume.ni = NULL;
+ }
ntfs_update_mftmirr(sbi, 0);
@@ -836,7 +858,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
struct ntfs_sb_info *sbi = sb->s_fs_info;
int err;
u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
- u64 sectors, clusters, mlcn, mlcn2;
+ u64 sectors, clusters, mlcn, mlcn2, dev_size0;
struct NTFS_BOOT *boot;
struct buffer_head *bh;
struct MFT_REC *rec;
@@ -845,6 +867,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
u32 boot_off = 0;
const char *hint = "Primary boot";
+ /* Save original dev_size. Used with alternative boot. */
+ dev_size0 = dev_size;
+
sbi->volume.blocks = dev_size >> PAGE_SHIFT;
bh = ntfs_bread(sb, 0);
@@ -853,6 +878,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
check_boot:
err = -EINVAL;
+
+ /* Corrupted image; do not read OOB */
+ if (bh->b_size - sizeof(*boot) < boot_off)
+ goto out;
+
boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) {
@@ -899,9 +929,17 @@ check_boot:
goto out;
}
- sbi->record_size = record_size =
- boot->record_size < 0 ? 1 << (-boot->record_size) :
- (u32)boot->record_size << cluster_bits;
+ if (boot->record_size >= 0) {
+ record_size = (u32)boot->record_size << cluster_bits;
+ } else if (-boot->record_size <= MAXIMUM_SHIFT_BYTES_PER_MFT) {
+ record_size = 1u << (-boot->record_size);
+ } else {
+ ntfs_err(sb, "%s: invalid record size %d.", hint,
+ boot->record_size);
+ goto out;
+ }
+
+ sbi->record_size = record_size;
sbi->record_bits = blksize_bits(record_size);
sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
@@ -918,9 +956,15 @@ check_boot:
goto out;
}
- sbi->index_size = boot->index_size < 0 ?
- 1u << (-boot->index_size) :
- (u32)boot->index_size << cluster_bits;
+ if (boot->index_size >= 0) {
+ sbi->index_size = (u32)boot->index_size << cluster_bits;
+ } else if (-boot->index_size <= MAXIMUM_SHIFT_BYTES_PER_INDEX) {
+ sbi->index_size = 1u << (-boot->index_size);
+ } else {
+ ntfs_err(sb, "%s: invalid index size %d.", hint,
+ boot->index_size);
+ goto out;
+ }
/* Check index record size. */
if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) {
@@ -1055,17 +1099,17 @@ check_boot:
if (bh->b_blocknr && !sb_rdonly(sb)) {
/*
- * Alternative boot is ok but primary is not ok.
- * Do not update primary boot here 'cause it may be faked boot.
- * Let ntfs to be mounted and update boot later.
- */
+ * Alternative boot is ok but primary is not ok.
+ * Do not update primary boot here 'cause it may be faked boot.
+ * Let ntfs to be mounted and update boot later.
+ */
*boot2 = kmemdup(boot, sizeof(*boot), GFP_NOFS | __GFP_NOWARN);
}
out:
- if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) {
+ if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
- u64 lbo = dev_size - sizeof(*boot);
+ u64 lbo = dev_size0 - sizeof(*boot);
/*
* Try alternative boot (last sector)
@@ -1079,6 +1123,7 @@ out:
boot_off = lbo & (block_size - 1);
hint = "Alternative boot";
+ dev_size = dev_size0; /* restore original size. */
goto check_boot;
}
brelse(bh);
@@ -1367,7 +1412,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
}
bytes = inode->i_size;
- sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
+ sbi->def_table = t = kvmalloc(bytes, GFP_KERNEL);
if (!t) {
err = -ENOMEM;
goto put_inode_out;
@@ -1521,9 +1566,9 @@ load_root:
if (boot2) {
/*
- * Alternative boot is ok but primary is not ok.
- * Volume is recognized as NTFS. Update primary boot.
- */
+ * Alternative boot is ok but primary is not ok.
+ * Volume is recognized as NTFS. Update primary boot.
+ */
struct buffer_head *bh0 = sb_getblk(sb, 0);
if (bh0) {
if (buffer_locked(bh0))
@@ -1564,6 +1609,7 @@ put_inode_out:
out:
ntfs3_put_sbi(sbi);
kfree(boot2);
+ ntfs3_put_sbi(sbi);
return err;
}
@@ -1757,7 +1803,6 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
-
#ifdef CONFIG_PROC_FS
/* Create "/proc/fs/ntfs3" */
proc_info_root = proc_mkdir("fs/ntfs3", NULL);
@@ -1799,7 +1844,6 @@ static void __exit exit_ntfs_fs(void)
if (proc_info_root)
remove_proc_entry("fs/ntfs3", NULL);
#endif
-
}
MODULE_LICENSE("GPL");
diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
index 29fd391899e5..4274b6f31cfa 100644
--- a/fs/ntfs3/xattr.c
+++ b/fs/ntfs3/xattr.c
@@ -211,7 +211,8 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
size = le32_to_cpu(info->size);
/* Enumerate all xattrs. */
- for (ret = 0, off = 0; off < size; off += ea_size) {
+ ret = 0;
+ for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) {
ea = Add2Ptr(ea_all, off);
ea_size = unpacked_ea_size(ea);
@@ -219,6 +220,10 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
break;
if (buffer) {
+ /* Check if we can use field ea->name */
+ if (off + ea_size > size)
+ break;
+
if (ret + ea->name_len + 1 > bytes_per_buffer) {
err = -ERANGE;
goto out;
@@ -1016,7 +1021,7 @@ static const struct xattr_handler ntfs_other_xattr_handler = {
.list = ntfs_xattr_user_list,
};
-const struct xattr_handler *ntfs_xattr_handlers[] = {
+const struct xattr_handler * const ntfs_xattr_handlers[] = {
&ntfs_other_xattr_handler,
NULL,
};
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index e75137a8e7cb..62464d194da3 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -193,8 +193,8 @@ static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
inode->i_mode = new_mode;
inode_set_ctime_current(inode);
di->i_mode = cpu_to_le16(inode->i_mode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index aef58f1395c8..f0937902f7b4 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -7436,10 +7436,10 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 1);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0fdba30740ab..6ab03494fc6e 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2048,9 +2048,9 @@ out_write_size:
}
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
- inode->i_mtime = inode_set_ctime_current(inode);
- di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
- di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
+ di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
if (handle)
ocfs2_update_inode_fsync_trans(handle, inode, 1);
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 21472e3ed182..4d7efefa98c5 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -213,7 +213,7 @@ struct o2hb_region {
unsigned int hr_num_pages;
struct page **hr_slot_data;
- struct block_device *hr_bdev;
+ struct bdev_handle *hr_bdev_handle;
struct o2hb_disk_slot *hr_slots;
/* live node map of this region */
@@ -261,6 +261,11 @@ struct o2hb_region {
int hr_last_hb_status;
};
+static inline struct block_device *reg_bdev(struct o2hb_region *reg)
+{
+ return reg->hr_bdev_handle ? reg->hr_bdev_handle->bdev : NULL;
+}
+
struct o2hb_bio_wait_ctxt {
atomic_t wc_num_reqs;
struct completion wc_io_complete;
@@ -286,7 +291,7 @@ static void o2hb_write_timeout(struct work_struct *work)
hr_write_timeout_work.work);
mlog(ML_ERROR, "Heartbeat write timeout to device %pg after %u "
- "milliseconds\n", reg->hr_bdev,
+ "milliseconds\n", reg_bdev(reg),
jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
if (o2hb_global_heartbeat_active()) {
@@ -383,7 +388,7 @@ static void o2hb_nego_timeout(struct work_struct *work)
if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%pg).\n",
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000,
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
set_bit(master_node, reg->hr_nego_node_bitmap);
}
if (!bitmap_equal(reg->hr_nego_node_bitmap, live_node_bitmap,
@@ -398,7 +403,8 @@ static void o2hb_nego_timeout(struct work_struct *work)
}
printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%pg) is down.\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item),
+ reg_bdev(reg));
/* approve negotiate timeout request. */
o2hb_arm_timeout(reg);
@@ -419,7 +425,7 @@ static void o2hb_nego_timeout(struct work_struct *work)
/* negotiate timeout with master node. */
printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%pg), negotiate timeout with node %d.\n",
o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(&reg->hr_item),
- reg->hr_bdev, master_node);
+ reg_bdev(reg), master_node);
ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG,
master_node);
if (ret)
@@ -436,7 +442,8 @@ static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data,
nego_msg = (struct o2hb_nego_msg *)msg->buf;
printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%pg).\n",
- nego_msg->node_num, config_item_name(&reg->hr_item), reg->hr_bdev);
+ nego_msg->node_num, config_item_name(&reg->hr_item),
+ reg_bdev(reg));
if (nego_msg->node_num < O2NM_MAX_NODES)
set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap);
else
@@ -451,7 +458,7 @@ static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data,
struct o2hb_region *reg = data;
printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%pg).\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
o2hb_arm_timeout(reg);
return 0;
}
@@ -515,7 +522,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
* GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this
* all together. */
- bio = bio_alloc(reg->hr_bdev, 16, opf, GFP_ATOMIC);
+ bio = bio_alloc(reg_bdev(reg), 16, opf, GFP_ATOMIC);
if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM);
@@ -687,7 +694,7 @@ static int o2hb_check_own_slot(struct o2hb_region *reg)
errstr = ERRSTR3;
mlog(ML_ERROR, "%s (%pg): expected(%u:0x%llx, 0x%llx), "
- "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_bdev,
+ "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg_bdev(reg),
slot->ds_node_num, (unsigned long long)slot->ds_last_generation,
(unsigned long long)slot->ds_last_time, hb_block->hb_node,
(unsigned long long)le64_to_cpu(hb_block->hb_generation),
@@ -861,7 +868,7 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg)
goto unlock;
printk(KERN_NOTICE "o2hb: Region %s (%pg) is now a quorum device\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
@@ -920,7 +927,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
* consider it a transient miss but don't populate any
* other values as they may be junk. */
mlog(ML_ERROR, "Node %d has written a bad crc to %pg\n",
- slot->ds_node_num, reg->hr_bdev);
+ slot->ds_node_num, reg_bdev(reg));
o2hb_dump_slot(hb_block);
slot->ds_equal_samples++;
@@ -1003,8 +1010,8 @@ fire_callbacks:
"of %u ms, but our count is %u ms.\n"
"Please double check your configuration values "
"for 'O2CB_HEARTBEAT_THRESHOLD'\n",
- slot->ds_node_num, reg->hr_bdev, slot_dead_ms,
- dead_ms);
+ slot->ds_node_num, reg_bdev(reg),
+ slot_dead_ms, dead_ms);
}
goto out;
}
@@ -1143,7 +1150,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
* can't be sure that the new block ever made it to
* disk */
mlog(ML_ERROR, "Write error %d on device \"%pg\"\n",
- write_wc.wc_error, reg->hr_bdev);
+ write_wc.wc_error, reg_bdev(reg));
ret = write_wc.wc_error;
goto bail;
}
@@ -1169,7 +1176,7 @@ bail:
printk(KERN_NOTICE "o2hb: Unable to stabilize "
"heartbeat on region %s (%pg)\n",
config_item_name(&reg->hr_item),
- reg->hr_bdev);
+ reg_bdev(reg));
atomic_set(&reg->hr_steady_iterations, 0);
reg->hr_aborted_start = 1;
wake_up(&o2hb_steady_queue);
@@ -1489,7 +1496,7 @@ static void o2hb_region_release(struct config_item *item)
struct page *page;
struct o2hb_region *reg = to_o2hb_region(item);
- mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg->hr_bdev);
+ mlog(ML_HEARTBEAT, "hb region release (%pg)\n", reg_bdev(reg));
kfree(reg->hr_tmp_block);
@@ -1502,8 +1509,8 @@ static void o2hb_region_release(struct config_item *item)
kfree(reg->hr_slot_data);
}
- if (reg->hr_bdev)
- blkdev_put(reg->hr_bdev, NULL);
+ if (reg->hr_bdev_handle)
+ bdev_release(reg->hr_bdev_handle);
kfree(reg->hr_slots);
@@ -1562,7 +1569,7 @@ static ssize_t o2hb_region_block_bytes_store(struct config_item *item,
unsigned long block_bytes;
unsigned int block_bits;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
return -EINVAL;
status = o2hb_read_block_input(reg, page, &block_bytes,
@@ -1591,7 +1598,7 @@ static ssize_t o2hb_region_start_block_store(struct config_item *item,
char *p = (char *)page;
ssize_t ret;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
return -EINVAL;
ret = kstrtoull(p, 0, &tmp);
@@ -1616,7 +1623,7 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item,
unsigned long tmp;
char *p = (char *)page;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
return -EINVAL;
tmp = simple_strtoul(p, &p, 0);
@@ -1635,8 +1642,8 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page)
{
unsigned int ret = 0;
- if (to_o2hb_region(item)->hr_bdev)
- ret = sprintf(page, "%pg\n", to_o2hb_region(item)->hr_bdev);
+ if (to_o2hb_region(item)->hr_bdev_handle)
+ ret = sprintf(page, "%pg\n", reg_bdev(to_o2hb_region(item)));
return ret;
}
@@ -1745,7 +1752,10 @@ out:
return ret;
}
-/* this is acting as commit; we set up all of hr_bdev and hr_task or nothing */
+/*
+ * this is acting as commit; we set up all of hr_bdev_handle and hr_task or
+ * nothing
+ */
static ssize_t o2hb_region_dev_store(struct config_item *item,
const char *page,
size_t count)
@@ -1759,7 +1769,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
ssize_t ret = -EINVAL;
int live_threshold;
- if (reg->hr_bdev)
+ if (reg->hr_bdev_handle)
goto out;
/* We can't heartbeat without having had our node number
@@ -1785,16 +1795,15 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (!S_ISBLK(f.file->f_mapping->host->i_mode))
goto out2;
- reg->hr_bdev = blkdev_get_by_dev(f.file->f_mapping->host->i_rdev,
- BLK_OPEN_WRITE | BLK_OPEN_READ, NULL,
- NULL);
- if (IS_ERR(reg->hr_bdev)) {
- ret = PTR_ERR(reg->hr_bdev);
- reg->hr_bdev = NULL;
+ reg->hr_bdev_handle = bdev_open_by_dev(f.file->f_mapping->host->i_rdev,
+ BLK_OPEN_WRITE | BLK_OPEN_READ, NULL, NULL);
+ if (IS_ERR(reg->hr_bdev_handle)) {
+ ret = PTR_ERR(reg->hr_bdev_handle);
+ reg->hr_bdev_handle = NULL;
goto out2;
}
- sectsize = bdev_logical_block_size(reg->hr_bdev);
+ sectsize = bdev_logical_block_size(reg_bdev(reg));
if (sectsize != reg->hr_block_bytes) {
mlog(ML_ERROR,
"blocksize %u incorrect for device, expected %d",
@@ -1890,12 +1899,12 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (hb_task && o2hb_global_heartbeat_active())
printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%pg)\n",
- config_item_name(&reg->hr_item), reg->hr_bdev);
+ config_item_name(&reg->hr_item), reg_bdev(reg));
out3:
if (ret < 0) {
- blkdev_put(reg->hr_bdev, NULL);
- reg->hr_bdev = NULL;
+ bdev_release(reg->hr_bdev_handle);
+ reg->hr_bdev_handle = NULL;
}
out2:
fdput(f);
@@ -2085,7 +2094,7 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%pg)\n",
((atomic_read(&reg->hr_steady_iterations) == 0) ?
"stopped" : "start aborted"), config_item_name(item),
- reg->hr_bdev);
+ reg_bdev(reg));
}
/*
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 8b123d543e6e..a14c8fee6ee5 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1658,7 +1658,8 @@ int __ocfs2_add_entry(handle_t *handle,
offset, ocfs2_dir_trailer_blk_off(dir->i_sb));
if (ocfs2_dirent_would_fit(de, rec_len)) {
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_current(dir));
retval = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
if (retval < 0) {
mlog_errno(retval);
@@ -2962,11 +2963,11 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
ocfs2_dinode_new_extent_list(dir, di);
i_size_write(dir, sb->s_blocksize);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
di->i_size = cpu_to_le64(sb->s_blocksize);
- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(dir).tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(dir).tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(dir));
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(dir));
ocfs2_update_inode_fsync_trans(handle, dir, 1);
/*
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 81265123ce6c..9b57d012fd5c 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -337,7 +337,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb)
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inc_nlink(inode);
inode->i_fop = &simple_dir_operations;
@@ -360,7 +360,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent,
inode->i_ino = get_next_ino();
inode_init_owner(&nop_mnt_idmap, inode, parent, mode);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
ip = DLMFS_I(inode);
ip->ip_conn = DLMFS_I(parent)->ip_conn;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c3e2961ee5db..64a6ef638495 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2162,7 +2162,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 ts;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
@@ -2183,12 +2183,12 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
lvb->lvb_imode = cpu_to_be16(inode->i_mode);
lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
- lvb->lvb_iatime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
- lvb->lvb_ictime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&ctime));
- lvb->lvb_imtime_packed =
- cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
+ ts = inode_get_atime(inode);
+ lvb->lvb_iatime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
+ ts = inode_get_ctime(inode);
+ lvb->lvb_ictime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
+ ts = inode_get_mtime(inode);
+ lvb->lvb_imtime_packed = cpu_to_be64(ocfs2_pack_timespec(&ts));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
@@ -2209,7 +2209,7 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
- struct timespec64 ctime;
+ struct timespec64 ts;
mlog_meta_lvb(0, lockres);
@@ -2236,13 +2236,12 @@ static int ocfs2_refresh_inode_from_lvb(struct inode *inode)
i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
inode->i_mode = be16_to_cpu(lvb->lvb_imode);
set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
- ocfs2_unpack_timespec(&inode->i_atime,
- be64_to_cpu(lvb->lvb_iatime_packed));
- ocfs2_unpack_timespec(&inode->i_mtime,
- be64_to_cpu(lvb->lvb_imtime_packed));
- ocfs2_unpack_timespec(&ctime,
- be64_to_cpu(lvb->lvb_ictime_packed));
- inode_set_ctime_to_ts(inode, ctime);
+ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_iatime_packed));
+ inode_set_atime_to_ts(inode, ts);
+ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_imtime_packed));
+ inode_set_mtime_to_ts(inode, ts);
+ ocfs2_unpack_timespec(&ts, be64_to_cpu(lvb->lvb_ictime_packed));
+ inode_set_ctime_to_ts(inode, ts);
spin_unlock(&oi->ip_lock);
return 0;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index c45596c25c66..94e2a1244442 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -233,16 +233,18 @@ int ocfs2_should_update_atime(struct inode *inode,
if (vfsmnt->mnt_flags & MNT_RELATIME) {
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 atime = inode_get_atime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
- if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
- (timespec64_compare(&inode->i_atime, &ctime) <= 0))
+ if ((timespec64_compare(&atime, &mtime) <= 0) ||
+ (timespec64_compare(&atime, &ctime) <= 0))
return 1;
return 0;
}
now = current_time(inode);
- if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
+ if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum))
return 0;
else
return 1;
@@ -275,9 +277,9 @@ int ocfs2_update_inode_atime(struct inode *inode,
* have i_rwsem to guard against concurrent changes to other
* inode fields.
*/
- inode->i_atime = current_time(inode);
- di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
+ inode_set_atime_to_ts(inode, current_time(inode));
+ di->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
+ di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, bh);
@@ -296,7 +298,7 @@ int ocfs2_set_inode_size(handle_t *handle,
i_size_write(inode, new_i_size);
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
if (status < 0) {
@@ -417,12 +419,12 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
}
i_size_write(inode, new_i_size);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
di = (struct ocfs2_dinode *) fe_bh->b_data;
di->i_size = cpu_to_le64(new_i_size);
- di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
@@ -821,9 +823,9 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
i_size_write(inode, abs_to);
inode->i_blocks = ocfs2_inode_sector_count(inode);
di->i_size = cpu_to_le64((u64)i_size_read(inode));
- inode->i_mtime = inode_set_ctime_current(inode);
- di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
di->i_mtime_nsec = di->i_ctime_nsec;
if (handle) {
ocfs2_journal_dirty(handle, di_bh);
@@ -2040,7 +2042,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0)
mlog_errno(ret);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index e8771600b930..999111bfc271 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -302,10 +302,10 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_mapping->a_ops = &ocfs2_aops;
}
- inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
- inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
- inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
- inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
+ inode_set_atime(inode, le64_to_cpu(fe->i_atime),
+ le32_to_cpu(fe->i_atime_nsec));
+ inode_set_mtime(inode, le64_to_cpu(fe->i_mtime),
+ le32_to_cpu(fe->i_mtime_nsec));
inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
le32_to_cpu(fe->i_ctime_nsec));
@@ -1312,12 +1312,12 @@ int ocfs2_mark_inode_dirty(handle_t *handle,
fe->i_uid = cpu_to_le32(i_uid_read(inode));
fe->i_gid = cpu_to_le32(i_gid_read(inode));
fe->i_mode = cpu_to_le16(inode->i_mode);
- fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
- fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
- fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ fe->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
+ fe->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ fe->i_mtime = cpu_to_le64(inode_get_mtime_sec(inode));
+ fe->i_mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
ocfs2_journal_dirty(handle, bh);
ocfs2_update_inode_fsync_trans(handle, inode, 1);
@@ -1348,10 +1348,10 @@ void ocfs2_refresh_inode(struct inode *inode,
inode->i_blocks = 0;
else
inode->i_blocks = ocfs2_inode_sector_count(inode);
- inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
- inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
- inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
- inode->i_mtime.tv_nsec = le32_to_cpu(fe->i_mtime_nsec);
+ inode_set_atime(inode, le64_to_cpu(fe->i_atime),
+ le32_to_cpu(fe->i_atime_nsec));
+ inode_set_mtime(inode, le64_to_cpu(fe->i_mtime),
+ le32_to_cpu(fe->i_mtime_nsec));
inode_set_ctime(inode, le64_to_cpu(fe->i_ctime),
le32_to_cpu(fe->i_ctime_nsec));
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 05d67968a3a9..1f9ed117e78b 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -951,8 +951,8 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
di = (struct ocfs2_dinode *)di_bh->b_data;
inode_set_ctime_current(inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 5cd6d7771cea..681e9501cdd3 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -795,8 +795,8 @@ static int ocfs2_link(struct dentry *old_dentry,
inc_nlink(inode);
inode_set_ctime_current(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
- fe->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
@@ -995,7 +995,7 @@ static int ocfs2_unlink(struct inode *dir,
ocfs2_set_links_count(fe, inode->i_nlink);
ocfs2_journal_dirty(handle, fe_bh);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (S_ISDIR(inode->i_mode))
drop_nlink(dir);
@@ -1550,8 +1550,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
if (status >= 0) {
old_di = (struct ocfs2_dinode *) old_inode_bh->b_data;
- old_di->i_ctime = cpu_to_le64(inode_get_ctime(old_inode).tv_sec);
- old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(old_inode).tv_nsec);
+ old_di->i_ctime = cpu_to_le64(inode_get_ctime_sec(old_inode));
+ old_di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(old_inode));
ocfs2_journal_dirty(handle, old_inode_bh);
} else
mlog_errno(status);
@@ -1592,7 +1592,7 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
drop_nlink(new_inode);
inode_set_ctime_current(new_inode);
}
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
if (update_dot_dot) {
status = ocfs2_update_entry(old_inode, handle,
@@ -1614,8 +1614,8 @@ static int ocfs2_rename(struct mnt_idmap *idmap,
if (old_dir != new_dir) {
/* Keep the same times on both directories.*/
- new_dir->i_mtime = inode_set_ctime_to_ts(new_dir,
- inode_get_ctime(old_dir));
+ inode_set_mtime_to_ts(new_dir,
+ inode_set_ctime_to_ts(new_dir, inode_get_ctime(old_dir)));
/*
* This will also pick up the i_nlink change from the
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 25c8ec3c8c3a..3f80a56d0d60 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3751,8 +3751,8 @@ static int ocfs2_change_ctime(struct inode *inode,
}
inode_set_ctime_current(inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_journal_dirty(handle, di_bh);
@@ -4075,10 +4075,10 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
*/
inode_set_ctime_current(t_inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(t_inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(t_inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(t_inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(t_inode));
- t_inode->i_mtime = s_inode->i_mtime;
+ inode_set_mtime_to_ts(t_inode, inode_get_mtime(s_inode));
di->i_mtime = s_di->i_mtime;
di->i_mtime_nsec = s_di->i_mtime_nsec;
}
@@ -4456,7 +4456,7 @@ int ocfs2_reflink_update_dest(struct inode *dest,
if (newlen > i_size_read(dest))
i_size_write(dest, newlen);
spin_unlock(&OCFS2_I(dest)->ip_lock);
- dest->i_mtime = inode_set_ctime_current(dest);
+ inode_set_mtime_to_ts(dest, inode_set_ctime_current(dest));
ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
if (ret) {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 6510ad783c91..3b81213ed7b8 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -87,14 +87,14 @@ static struct ocfs2_xattr_def_value_root def_xv = {
.xv.xr_list.l_count = cpu_to_le16(1),
};
-const struct xattr_handler *ocfs2_xattr_handlers[] = {
+const struct xattr_handler * const ocfs2_xattr_handlers[] = {
&ocfs2_xattr_user_handler,
&ocfs2_xattr_trusted_handler,
&ocfs2_xattr_security_handler,
NULL
};
-static const struct xattr_handler *ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
+static const struct xattr_handler * const ocfs2_xattr_handler_map[OCFS2_XATTR_MAX] = {
[OCFS2_XATTR_INDEX_USER] = &ocfs2_xattr_user_handler,
[OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access,
[OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default,
@@ -3422,8 +3422,8 @@ static int __ocfs2_xattr_set_handle(struct inode *inode,
}
inode_set_ctime_current(inode);
- di->i_ctime = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- di->i_ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
+ di->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
+ di->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
ocfs2_journal_dirty(ctxt->handle, xis->inode_bh);
}
out:
diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
index 00308b57f64f..65e9aa743919 100644
--- a/fs/ocfs2/xattr.h
+++ b/fs/ocfs2/xattr.h
@@ -30,7 +30,7 @@ struct ocfs2_security_xattr_info {
extern const struct xattr_handler ocfs2_xattr_user_handler;
extern const struct xattr_handler ocfs2_xattr_trusted_handler;
extern const struct xattr_handler ocfs2_xattr_security_handler;
-extern const struct xattr_handler *ocfs2_xattr_handlers[];
+extern const struct xattr_handler * const ocfs2_xattr_handlers[];
ssize_t ocfs2_listxattr(struct dentry *, char *, size_t);
int ocfs2_xattr_get_nolock(struct inode *, struct buffer_head *, int,
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index 2f8c1882f45c..d6cd81163030 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -51,7 +51,7 @@ struct inode *omfs_new_inode(struct inode *dir, umode_t mode)
inode_init_owner(&nop_mnt_idmap, inode, NULL, mode);
inode->i_mapping->a_ops = &omfs_aops;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_op = &omfs_dir_inops;
@@ -134,8 +134,8 @@ static int __omfs_write_inode(struct inode *inode, int wait)
oi->i_head.h_magic = OMFS_IMAGIC;
oi->i_size = cpu_to_be64(inode->i_size);
- ctime = inode_get_ctime(inode).tv_sec * 1000LL +
- ((inode_get_ctime(inode).tv_nsec + 999)/1000);
+ ctime = inode_get_ctime_sec(inode) * 1000LL +
+ ((inode_get_ctime_nsec(inode) + 999)/1000);
oi->i_ctime = cpu_to_be64(ctime);
omfs_update_checksums(oi);
@@ -230,11 +230,9 @@ struct inode *omfs_iget(struct super_block *sb, ino_t ino)
ctime = be64_to_cpu(oi->i_ctime);
nsecs = do_div(ctime, 1000) * 1000L;
- inode->i_atime.tv_sec = ctime;
- inode->i_mtime.tv_sec = ctime;
+ inode_set_atime(inode, ctime, nsecs);
+ inode_set_mtime(inode, ctime, nsecs);
inode_set_ctime(inode, ctime, nsecs);
- inode->i_atime.tv_nsec = nsecs;
- inode->i_mtime.tv_nsec = nsecs;
inode->i_mapping->a_ops = &omfs_aops;
diff --git a/fs/open.c b/fs/open.c
index 98f6601fbac6..02dc608d40d8 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -870,6 +870,30 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
return ksys_fchown(fd, user, group);
}
+static inline int file_get_write_access(struct file *f)
+{
+ int error;
+
+ error = get_write_access(f->f_inode);
+ if (unlikely(error))
+ return error;
+ error = mnt_get_write_access(f->f_path.mnt);
+ if (unlikely(error))
+ goto cleanup_inode;
+ if (unlikely(f->f_mode & FMODE_BACKING)) {
+ error = mnt_get_write_access(backing_file_user_path(f)->mnt);
+ if (unlikely(error))
+ goto cleanup_mnt;
+ }
+ return 0;
+
+cleanup_mnt:
+ mnt_put_write_access(f->f_path.mnt);
+cleanup_inode:
+ put_write_access(f->f_inode);
+ return error;
+}
+
static int do_dentry_open(struct file *f,
struct inode *inode,
int (*open)(struct inode *, struct file *))
@@ -892,14 +916,9 @@ static int do_dentry_open(struct file *f,
if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) {
i_readcount_inc(inode);
} else if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
- error = get_write_access(inode);
+ error = file_get_write_access(f);
if (unlikely(error))
goto cleanup_file;
- error = __mnt_want_write(f->f_path.mnt);
- if (unlikely(error)) {
- put_write_access(inode);
- goto cleanup_file;
- }
f->f_mode |= FMODE_WRITER;
}
@@ -1163,20 +1182,19 @@ EXPORT_SYMBOL_GPL(kernel_file_open);
/**
* backing_file_open - open a backing file for kernel internal use
- * @path: path of the file to open
+ * @user_path: path that the user reuqested to open
* @flags: open flags
* @real_path: path of the backing file
* @cred: credentials for open
*
* Open a backing file for a stackable filesystem (e.g., overlayfs).
- * @path may be on the stackable filesystem and backing inode on the
- * underlying filesystem. In this case, we want to be able to return
- * the @real_path of the backing inode. This is done by embedding the
- * returned file into a container structure that also stores the path of
- * the backing inode on the underlying filesystem, which can be
- * retrieved using backing_file_real_path().
+ * @user_path may be on the stackable filesystem and @real_path on the
+ * underlying filesystem. In this case, we want to be able to return the
+ * @user_path of the stackable filesystem. This is done by embedding the
+ * returned file into a container structure that also stores the stacked
+ * file's path, which can be retrieved using backing_file_user_path().
*/
-struct file *backing_file_open(const struct path *path, int flags,
+struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred)
{
@@ -1187,9 +1205,9 @@ struct file *backing_file_open(const struct path *path, int flags,
if (IS_ERR(f))
return f;
- f->f_path = *path;
- path_get(real_path);
- *backing_file_real_path(f) = *real_path;
+ path_get(user_path);
+ *backing_file_user_path(f) = *user_path;
+ f->f_path = *real_path;
error = do_dentry_open(f, d_inode(real_path->dentry), NULL);
if (error) {
fput(f);
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index b2457cb97fa0..c4b65a6d41cc 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -237,7 +237,7 @@ found:
if (IS_ERR(inode))
return ERR_CAST(inode);
if (inode->i_state & I_NEW) {
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
ent_oi = OP_I(inode);
ent_oi->type = ent_type;
ent_oi->u = ent_data;
@@ -387,7 +387,7 @@ static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
goto out_no_root;
}
- root_inode->i_mtime = root_inode->i_atime = inode_set_ctime_current(root_inode);
+ simple_inode_init_ts(root_inode);
root_inode->i_op = &openprom_inode_operations;
root_inode->i_fop = &openprom_operations;
root_inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index b711654ca18a..926d9c0a428a 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -103,7 +103,7 @@ enum orangefs_vfs_op_states {
#define ORANGEFS_CACHE_CREATE_FLAGS 0
#endif
-extern const struct xattr_handler *orangefs_xattr_handlers[];
+extern const struct xattr_handler * const orangefs_xattr_handlers[];
extern struct posix_acl *orangefs_get_acl(struct inode *inode, int type, bool rcu);
extern int orangefs_set_acl(struct mnt_idmap *idmap,
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 0a9fcfdf552f..0fdceb00ca07 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -155,14 +155,14 @@ static inline void copy_attributes_from_inode(struct inode *inode,
if (orangefs_inode->attr_valid & ATTR_ATIME) {
attrs->mask |= ORANGEFS_ATTR_SYS_ATIME;
if (orangefs_inode->attr_valid & ATTR_ATIME_SET) {
- attrs->atime = (time64_t)inode->i_atime.tv_sec;
+ attrs->atime = (time64_t) inode_get_atime_sec(inode);
attrs->mask |= ORANGEFS_ATTR_SYS_ATIME_SET;
}
}
if (orangefs_inode->attr_valid & ATTR_MTIME) {
attrs->mask |= ORANGEFS_ATTR_SYS_MTIME;
if (orangefs_inode->attr_valid & ATTR_MTIME_SET) {
- attrs->mtime = (time64_t)inode->i_mtime.tv_sec;
+ attrs->mtime = (time64_t) inode_get_mtime_sec(inode);
attrs->mask |= ORANGEFS_ATTR_SYS_MTIME_SET;
}
}
@@ -357,15 +357,15 @@ again2:
downcall.resp.getattr.attributes.owner);
inode->i_gid = make_kgid(&init_user_ns, new_op->
downcall.resp.getattr.attributes.group);
- inode->i_atime.tv_sec = (time64_t)new_op->
- downcall.resp.getattr.attributes.atime;
- inode->i_mtime.tv_sec = (time64_t)new_op->
- downcall.resp.getattr.attributes.mtime;
+ inode_set_atime(inode,
+ (time64_t)new_op->downcall.resp.getattr.attributes.atime,
+ 0);
+ inode_set_mtime(inode,
+ (time64_t)new_op->downcall.resp.getattr.attributes.mtime,
+ 0);
inode_set_ctime(inode,
(time64_t)new_op->downcall.resp.getattr.attributes.ctime,
0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
/* special case: mark the root inode as sticky */
inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) |
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 68b62689a63e..74ef75586f38 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -554,7 +554,7 @@ static const struct xattr_handler orangefs_xattr_default_handler = {
.set = orangefs_xattr_set_default,
};
-const struct xattr_handler *orangefs_xattr_handlers[] = {
+const struct xattr_handler * const orangefs_xattr_handlers[] = {
&orangefs_xattr_default_handler,
NULL
};
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 8be4dc050d1e..ec3671ca140c 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -239,6 +239,7 @@ static void ovl_file_accessed(struct file *file)
{
struct inode *inode, *upperinode;
struct timespec64 ctime, uctime;
+ struct timespec64 mtime, umtime;
if (file->f_flags & O_NOATIME)
return;
@@ -251,9 +252,11 @@ static void ovl_file_accessed(struct file *file)
ctime = inode_get_ctime(inode);
uctime = inode_get_ctime(upperinode);
- if ((!timespec64_equal(&inode->i_mtime, &upperinode->i_mtime) ||
- !timespec64_equal(&ctime, &uctime))) {
- inode->i_mtime = upperinode->i_mtime;
+ mtime = inode_get_mtime(inode);
+ umtime = inode_get_mtime(upperinode);
+ if ((!timespec64_equal(&mtime, &umtime)) ||
+ !timespec64_equal(&ctime, &uctime)) {
+ inode_set_mtime_to_ts(inode, inode_get_mtime(upperinode));
inode_set_ctime_to_ts(inode, uctime);
}
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 83ef66644c21..b6e98a7d36ce 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -704,7 +704,8 @@ int ovl_update_time(struct inode *inode, int flags)
if (upperpath.dentry) {
touch_atime(&upperpath);
- inode->i_atime = d_inode(upperpath.dentry)->i_atime;
+ inode_set_atime_to_ts(inode,
+ inode_get_atime(d_inode(upperpath.dentry)));
}
}
return 0;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 3fa2416264a4..6cd949c59fed 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -34,14 +34,22 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
struct dentry *real = NULL, *lower;
int err;
- /* It's an overlay file */
+ /*
+ * vfs is only expected to call d_real() with NULL from d_real_inode()
+ * and with overlay inode from file_dentry() on an overlay file.
+ *
+ * TODO: remove @inode argument from d_real() API, remove code in this
+ * function that deals with non-NULL @inode and remove d_real() call
+ * from file_dentry().
+ */
if (inode && d_inode(dentry) == inode)
return dentry;
+ else if (inode)
+ goto bug;
if (!d_is_reg(dentry)) {
- if (!inode || inode == d_inode(dentry))
- return dentry;
- goto bug;
+ /* d_real_inode() is only relevant for regular files */
+ return dentry;
}
real = ovl_dentry_upper(dentry);
@@ -487,13 +495,13 @@ static const struct xattr_handler ovl_other_xattr_handler = {
.set = ovl_other_xattr_set,
};
-static const struct xattr_handler *ovl_trusted_xattr_handlers[] = {
+static const struct xattr_handler * const ovl_trusted_xattr_handlers[] = {
&ovl_own_trusted_xattr_handler,
&ovl_other_xattr_handler,
NULL
};
-static const struct xattr_handler *ovl_user_xattr_handlers[] = {
+static const struct xattr_handler * const ovl_user_xattr_handlers[] = {
&ovl_own_user_xattr_handler,
&ovl_other_xattr_handler,
NULL
@@ -1488,8 +1496,16 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_xattr = ofs->config.userxattr ? ovl_user_xattr_handlers :
ovl_trusted_xattr_handlers;
sb->s_fs_info = ofs;
+#ifdef CONFIG_FS_POSIX_ACL
sb->s_flags |= SB_POSIXACL;
+#endif
sb->s_iflags |= SB_I_SKIP_SYNC | SB_I_IMA_UNVERIFIABLE_SIGNATURE;
+ /*
+ * Ensure that umask handling is done by the filesystems used
+ * for the the upper layer instead of overlayfs as that would
+ * lead to unexpected results.
+ */
+ sb->s_iflags |= SB_I_NOUMASK;
err = -ENOMEM;
root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 89e0d60d35b6..868afd8834c3 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -1409,8 +1409,8 @@ void ovl_copyattr(struct inode *inode)
inode->i_uid = vfsuid_into_kuid(vfsuid);
inode->i_gid = vfsgid_into_kgid(vfsgid);
inode->i_mode = realinode->i_mode;
- inode->i_atime = realinode->i_atime;
- inode->i_mtime = realinode->i_mtime;
+ inode_set_atime_to_ts(inode, inode_get_atime(realinode));
+ inode_set_mtime_to_ts(inode, inode_get_mtime(realinode));
inode_set_ctime_to_ts(inode, inode_get_ctime(realinode));
i_size_write(inode, i_size_read(realinode));
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 139190165a1c..8916c455a469 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -227,6 +227,36 @@ static inline bool pipe_readable(const struct pipe_inode_info *pipe)
return !pipe_empty(head, tail) || !writers;
}
+static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf,
+ unsigned int tail)
+{
+ pipe_buf_release(pipe, buf);
+
+ /*
+ * If the pipe has a watch_queue, we need additional protection
+ * by the spinlock because notifications get posted with only
+ * this spinlock, no mutex
+ */
+ if (pipe_has_watch_queue(pipe)) {
+ spin_lock_irq(&pipe->rd_wait.lock);
+#ifdef CONFIG_WATCH_QUEUE
+ if (buf->flags & PIPE_BUF_FLAG_LOSS)
+ pipe->note_loss = true;
+#endif
+ pipe->tail = ++tail;
+ spin_unlock_irq(&pipe->rd_wait.lock);
+ return tail;
+ }
+
+ /*
+ * Without a watch_queue, we can simply increment the tail
+ * without the spinlock - the mutex is enough.
+ */
+ pipe->tail = ++tail;
+ return tail;
+}
+
static ssize_t
pipe_read(struct kiocb *iocb, struct iov_iter *to)
{
@@ -320,17 +350,8 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
buf->len = 0;
}
- if (!buf->len) {
- pipe_buf_release(pipe, buf);
- spin_lock_irq(&pipe->rd_wait.lock);
-#ifdef CONFIG_WATCH_QUEUE
- if (buf->flags & PIPE_BUF_FLAG_LOSS)
- pipe->note_loss = true;
-#endif
- tail++;
- pipe->tail = tail;
- spin_unlock_irq(&pipe->rd_wait.lock);
- }
+ if (!buf->len)
+ tail = pipe_update_tail(pipe, buf, tail);
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
@@ -437,12 +458,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
goto out;
}
-#ifdef CONFIG_WATCH_QUEUE
- if (pipe->watch_queue) {
+ if (pipe_has_watch_queue(pipe)) {
ret = -EXDEV;
goto out;
}
-#endif
/*
* If it wasn't empty we try to merge new data into
@@ -507,16 +526,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
* it, either the reader will consume it or it'll still
* be there for the next write.
*/
- spin_lock_irq(&pipe->rd_wait.lock);
-
- head = pipe->head;
- if (pipe_full(head, pipe->tail, pipe->max_usage)) {
- spin_unlock_irq(&pipe->rd_wait.lock);
- continue;
- }
-
pipe->head = head + 1;
- spin_unlock_irq(&pipe->rd_wait.lock);
/* Insert it into the buffer array */
buf = &pipe->bufs[head & mask];
@@ -898,7 +908,7 @@ static struct inode * get_pipe_inode(void)
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
return inode;
@@ -1324,10 +1334,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned int arg)
unsigned int nr_slots, size;
long ret = 0;
-#ifdef CONFIG_WATCH_QUEUE
- if (pipe->watch_queue)
+ if (pipe_has_watch_queue(pipe))
return -EBUSY;
-#endif
size = round_pipe_size(arg);
nr_slots = size >> PAGE_SHIFT;
@@ -1379,10 +1387,8 @@ struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
if (file->f_op != &pipefifo_fops || !pipe)
return NULL;
-#ifdef CONFIG_WATCH_QUEUE
- if (for_splice && pipe->watch_queue)
+ if (for_splice && pipe_has_watch_queue(pipe))
return NULL;
-#endif
return pipe;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ffd54617c354..83396ab14998 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1902,7 +1902,7 @@ struct inode *proc_pid_make_inode(struct super_block *sb,
ei = PROC_I(inode);
inode->i_mode = mode;
inode->i_ino = get_next_ino();
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &proc_def_inode_operations;
/*
@@ -2218,7 +2218,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
rc = -ENOENT;
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
- *path = vma->vm_file->f_path;
+ *path = *file_user_path(vma->vm_file);
path_get(path);
rc = 0;
}
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 6276b3938842..6e72e5ad42bc 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -113,10 +113,12 @@ static bool tid_fd_mode(struct task_struct *task, unsigned fd, fmode_t *mode)
struct file *file;
rcu_read_lock();
- file = task_lookup_fd_rcu(task, fd);
- if (file)
- *mode = file->f_mode;
+ file = task_lookup_fdget_rcu(task, fd);
rcu_read_unlock();
+ if (file) {
+ *mode = file->f_mode;
+ fput(file);
+ }
return !!file;
}
@@ -259,12 +261,13 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
char name[10 + 1];
unsigned int len;
- f = task_lookup_next_fd_rcu(p, &fd);
+ f = task_lookup_next_fdget_rcu(p, &fd);
ctx->pos = fd + 2LL;
if (!f)
break;
data.mode = f->f_mode;
rcu_read_unlock();
+ fput(f);
data.fd = fd;
len = snprintf(name, sizeof(name), "%u", fd);
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 532dc9d240f7..592ed2516f47 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -660,7 +660,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
inode->i_private = de->data;
inode->i_ino = de->low_ino;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
PROC_I(inode)->pde = de;
if (is_empty_pde(de)) {
make_empty_dir_inode(inode);
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 4d3493579458..c6e7ebc63756 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -58,7 +58,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
if (file) {
seq_pad(m, ' ');
- seq_file_path(m, file, "");
+ seq_path(m, file_user_path(file), "");
}
seq_putc(m, '\n');
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index c88854df0b62..bc9a2db89cfa 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -465,7 +465,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
head->count++;
spin_unlock(&sysctl_lock);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = table->mode;
if (!S_ISDIR(table->mode)) {
inode->i_mode |= S_IFREG;
diff --git a/fs/proc/self.c b/fs/proc/self.c
index ecc4da8d265e..b46fbfd22681 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -46,7 +46,7 @@ int proc_setup_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = self_inum;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3dd5be96691b..1593940ca01e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -296,7 +296,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
if (anon_name)
seq_printf(m, "[anon_shmem:%s]", anon_name->name);
else
- seq_file_path(m, file, "\n");
+ seq_path(m, file_user_path(file), "\n");
goto done;
}
@@ -1967,7 +1967,7 @@ static int show_numa_map(struct seq_file *m, void *v)
if (file) {
seq_puts(m, " file=");
- seq_file_path(m, file, "\n\t= ");
+ seq_path(m, file_user_path(file), "\n\t= ");
} else if (vma_is_initial_heap(vma)) {
seq_puts(m, " heap");
} else if (vma_is_initial_stack(vma)) {
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 7cebd397cc26..bce674533000 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -157,7 +157,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
if (file) {
seq_pad(m, ' ');
- seq_file_path(m, file, "");
+ seq_path(m, file_user_path(file), "");
} else if (mm && vma_is_initial_stack(vma)) {
seq_pad(m, ' ');
seq_puts(m, "[stack]");
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index 63ac1f93289f..0e5050d6ab64 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -46,7 +46,7 @@ int proc_setup_thread_self(struct super_block *s)
struct inode *inode = new_inode(s);
if (inode) {
inode->i_ino = thread_self_inum;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mode = S_IFLNK | S_IRWXUGO;
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 585360706b33..d41c20d1b5e8 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -223,7 +223,7 @@ static struct inode *pstore_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
@@ -390,7 +390,8 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
inode->i_private = private;
if (record->time.tv_sec)
- inode->i_mtime = inode_set_ctime_to_ts(inode, record->time);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_to_ts(inode, record->time));
d_add(dentry, inode);
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index a7171f5532a1..6eb9bb369b57 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -301,10 +301,8 @@ struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid));
set_nlink(inode, le16_to_cpu(raw_inode->di_nlink));
inode->i_size = le32_to_cpu(raw_inode->di_size);
- inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->di_mtime);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_sec = le32_to_cpu(raw_inode->di_atime);
- inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode, le32_to_cpu(raw_inode->di_mtime), 0);
+ inode_set_atime(inode, le32_to_cpu(raw_inode->di_atime), 0);
inode_set_ctime(inode, le32_to_cpu(raw_inode->di_ctime), 0);
inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 21f90d519f1a..a286c545717f 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -558,10 +558,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino)
i_uid_write(inode, (uid_t)fs32_to_cpu(sbi, raw_inode->di_uid));
i_gid_write(inode, (gid_t)fs32_to_cpu(sbi, raw_inode->di_gid));
inode->i_size = fs64_to_cpu(sbi, raw_inode->di_size);
- inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_mtime);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->di_atime);
- inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode, fs32_to_cpu(sbi, raw_inode->di_mtime), 0);
+ inode_set_atime(inode, fs32_to_cpu(sbi, raw_inode->di_atime), 0);
inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->di_ctime), 0);
/* calc blocks based on 512 byte blocksize */
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index 18e8387cab41..4ac05a9e25bc 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -65,7 +65,7 @@ struct inode *ramfs_get_inode(struct super_block *sb,
inode->i_mapping->a_ops = &ram_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
@@ -105,7 +105,7 @@ ramfs_mknod(struct mnt_idmap *idmap, struct inode *dir,
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
error = 0;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
return error;
}
@@ -138,7 +138,8 @@ static int ramfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
if (!error) {
d_instantiate(dentry, inode);
dget(dentry);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_current(dir));
} else
iput(inode);
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 86e55d4bb10d..c8572346556f 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1257,11 +1257,9 @@ static void init_inode(struct inode *inode, struct treepath *path)
i_uid_write(inode, sd_v1_uid(sd));
i_gid_write(inode, sd_v1_gid(sd));
inode->i_size = sd_v1_size(sd);
- inode->i_atime.tv_sec = sd_v1_atime(sd);
- inode->i_mtime.tv_sec = sd_v1_mtime(sd);
+ inode_set_atime(inode, sd_v1_atime(sd), 0);
+ inode_set_mtime(inode, sd_v1_mtime(sd), 0);
inode_set_ctime(inode, sd_v1_ctime(sd), 0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
inode->i_blocks = sd_v1_blocks(sd);
inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
@@ -1311,11 +1309,9 @@ static void init_inode(struct inode *inode, struct treepath *path)
i_uid_write(inode, sd_v2_uid(sd));
inode->i_size = sd_v2_size(sd);
i_gid_write(inode, sd_v2_gid(sd));
- inode->i_mtime.tv_sec = sd_v2_mtime(sd);
- inode->i_atime.tv_sec = sd_v2_atime(sd);
+ inode_set_mtime(inode, sd_v2_mtime(sd), 0);
+ inode_set_atime(inode, sd_v2_atime(sd), 0);
inode_set_ctime(inode, sd_v2_ctime(sd), 0);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
inode->i_blocks = sd_v2_blocks(sd);
rdev = sd_v2_rdev(sd);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
@@ -1370,9 +1366,9 @@ static void inode2sd(void *sd, struct inode *inode, loff_t size)
set_sd_v2_uid(sd_v2, i_uid_read(inode));
set_sd_v2_size(sd_v2, size);
set_sd_v2_gid(sd_v2, i_gid_read(inode));
- set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
- set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
- set_sd_v2_ctime(sd_v2, inode_get_ctime(inode).tv_sec);
+ set_sd_v2_mtime(sd_v2, inode_get_mtime_sec(inode));
+ set_sd_v2_atime(sd_v2, inode_get_atime_sec(inode));
+ set_sd_v2_ctime(sd_v2, inode_get_ctime_sec(inode));
set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE));
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev));
@@ -1391,9 +1387,9 @@ static void inode2sd_v1(void *sd, struct inode *inode, loff_t size)
set_sd_v1_gid(sd_v1, i_gid_read(inode));
set_sd_v1_nlink(sd_v1, inode->i_nlink);
set_sd_v1_size(sd_v1, size);
- set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec);
- set_sd_v1_ctime(sd_v1, inode_get_ctime(inode).tv_sec);
- set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec);
+ set_sd_v1_atime(sd_v1, inode_get_atime_sec(inode));
+ set_sd_v1_ctime(sd_v1, inode_get_ctime_sec(inode));
+ set_sd_v1_mtime(sd_v1, inode_get_mtime_sec(inode));
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev));
@@ -1984,7 +1980,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
/* uid and gid must already be set by the caller for quota init */
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_size = i_size;
inode->i_blocks = 0;
inode->i_bytes = 0;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 015bfe4e4524..171c912af50f 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -90,8 +90,7 @@ static int flush_commit_list(struct super_block *s,
static int can_dirty(struct reiserfs_journal_cnode *cn);
static int journal_join(struct reiserfs_transaction_handle *th,
struct super_block *sb);
-static void release_journal_dev(struct super_block *super,
- struct reiserfs_journal *journal);
+static void release_journal_dev(struct reiserfs_journal *journal);
static void dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl);
static void flush_async_commits(struct work_struct *work);
@@ -1893,7 +1892,7 @@ static void free_journal_ram(struct super_block *sb)
* j_header_bh is on the journal dev, make sure
* not to release the journal dev until we brelse j_header_bh
*/
- release_journal_dev(sb, journal);
+ release_journal_dev(journal);
vfree(journal);
}
@@ -2387,7 +2386,7 @@ static int journal_read(struct super_block *sb)
cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
reiserfs_info(sb, "checking transaction log (%pg)\n",
- journal->j_dev_bd);
+ journal->j_bdev_handle->bdev);
start = ktime_get_seconds();
/*
@@ -2448,7 +2447,7 @@ static int journal_read(struct super_block *sb)
* device and journal device to be the same
*/
d_bh =
- reiserfs_breada(journal->j_dev_bd, cur_dblock,
+ reiserfs_breada(journal->j_bdev_handle->bdev, cur_dblock,
sb->s_blocksize,
SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
SB_ONDISK_JOURNAL_SIZE(sb));
@@ -2587,17 +2586,11 @@ static void journal_list_init(struct super_block *sb)
SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
}
-static void release_journal_dev(struct super_block *super,
- struct reiserfs_journal *journal)
+static void release_journal_dev(struct reiserfs_journal *journal)
{
- if (journal->j_dev_bd != NULL) {
- void *holder = NULL;
-
- if (journal->j_dev_bd->bd_dev != super->s_dev)
- holder = journal;
-
- blkdev_put(journal->j_dev_bd, holder);
- journal->j_dev_bd = NULL;
+ if (journal->j_bdev_handle) {
+ bdev_release(journal->j_bdev_handle);
+ journal->j_bdev_handle = NULL;
}
}
@@ -2612,7 +2605,7 @@ static int journal_init_dev(struct super_block *super,
result = 0;
- journal->j_dev_bd = NULL;
+ journal->j_bdev_handle = NULL;
jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
@@ -2623,36 +2616,37 @@ static int journal_init_dev(struct super_block *super,
if ((!jdev_name || !jdev_name[0])) {
if (jdev == super->s_dev)
holder = NULL;
- journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, holder,
- NULL);
- if (IS_ERR(journal->j_dev_bd)) {
- result = PTR_ERR(journal->j_dev_bd);
- journal->j_dev_bd = NULL;
+ journal->j_bdev_handle = bdev_open_by_dev(jdev, blkdev_mode,
+ holder, NULL);
+ if (IS_ERR(journal->j_bdev_handle)) {
+ result = PTR_ERR(journal->j_bdev_handle);
+ journal->j_bdev_handle = NULL;
reiserfs_warning(super, "sh-458",
"cannot init journal device unknown-block(%u,%u): %i",
MAJOR(jdev), MINOR(jdev), result);
return result;
} else if (jdev != super->s_dev)
- set_blocksize(journal->j_dev_bd, super->s_blocksize);
+ set_blocksize(journal->j_bdev_handle->bdev,
+ super->s_blocksize);
return 0;
}
- journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, holder,
- NULL);
- if (IS_ERR(journal->j_dev_bd)) {
- result = PTR_ERR(journal->j_dev_bd);
- journal->j_dev_bd = NULL;
+ journal->j_bdev_handle = bdev_open_by_path(jdev_name, blkdev_mode,
+ holder, NULL);
+ if (IS_ERR(journal->j_bdev_handle)) {
+ result = PTR_ERR(journal->j_bdev_handle);
+ journal->j_bdev_handle = NULL;
reiserfs_warning(super, "sh-457",
"journal_init_dev: Cannot open '%s': %i",
jdev_name, result);
return result;
}
- set_blocksize(journal->j_dev_bd, super->s_blocksize);
+ set_blocksize(journal->j_bdev_handle->bdev, super->s_blocksize);
reiserfs_info(super,
"journal_init_dev: journal device: %pg\n",
- journal->j_dev_bd);
+ journal->j_bdev_handle->bdev);
return 0;
}
@@ -2810,7 +2804,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
"journal header magic %x (device %pg) does "
"not match to magic found in super block %x",
jh->jh_journal.jp_journal_magic,
- journal->j_dev_bd,
+ journal->j_bdev_handle->bdev,
sb_jp_journal_magic(rs));
brelse(bhjh);
goto free_and_return;
@@ -2834,7 +2828,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
reiserfs_info(sb, "journal params: device %pg, size %u, "
"journal first block %u, max trans len %u, max batch %u, "
"max commit age %u, max trans age %u\n",
- journal->j_dev_bd,
+ journal->j_bdev_handle->bdev,
SB_ONDISK_JOURNAL_SIZE(sb),
SB_ONDISK_JOURNAL_1st_BLOCK(sb),
journal->j_trans_max,
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 9c5704be2435..994d6e6995ab 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -572,7 +572,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
}
dir->i_size += paste_size;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
if (!S_ISDIR(inode->i_mode) && visible)
/* reiserfs_mkdir or reiserfs_rename will do that by itself */
reiserfs_update_sd(th, dir);
@@ -966,8 +966,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_nlink);
clear_nlink(inode);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
reiserfs_update_sd(&th, inode);
DEC_DIR_INODE_NLINK(dir)
@@ -1075,7 +1075,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
reiserfs_update_sd(&th, inode);
dir->i_size -= (de.de_entrylen + DEH_SIZE);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
reiserfs_update_sd(&th, dir);
if (!savelink)
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 3dba8acf4e83..83cb9402e0f9 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -354,7 +354,7 @@ static int show_journal(struct seq_file *m, void *unused)
"prepare: \t%12lu\n"
"prepare_retry: \t%12lu\n",
DJP(jp_journal_1st_block),
- SB_JOURNAL(sb)->j_dev_bd,
+ SB_JOURNAL(sb)->j_bdev_handle->bdev,
DJP(jp_journal_dev),
DJP(jp_journal_size),
DJP(jp_journal_trans_max),
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 7d12b8c5b2fa..725667880e62 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -299,7 +299,7 @@ struct reiserfs_journal {
/* oldest journal block. start here for traverse */
struct reiserfs_journal_cnode *j_first;
- struct block_device *j_dev_bd;
+ struct bdev_handle *j_bdev_handle;
/* first block on s_dev of reserved area journal */
int j_1st_reserved_block;
@@ -1165,7 +1165,7 @@ static inline int bmap_would_wrap(unsigned bmap_nr)
return bmap_nr > ((1LL << 16) - 1);
}
-extern const struct xattr_handler *reiserfs_xattr_handlers[];
+extern const struct xattr_handler * const reiserfs_xattr_handlers[];
/*
* this says about version of key of all items (but stat data) the
@@ -2809,9 +2809,12 @@ struct reiserfs_journal_header {
#define journal_hash(t,sb,block) ((t)[_jhashfn((sb),(block)) & JBH_HASH_MASK])
/* We need these to make journal.c code more readable */
-#define journal_find_get_block(s, block) __find_get_block(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
-#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
-#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_dev_bd, block, s->s_blocksize)
+#define journal_find_get_block(s, block) __find_get_block(\
+ SB_JOURNAL(s)->j_bdev_handle->bdev, block, s->s_blocksize)
+#define journal_getblk(s, block) __getblk(SB_JOURNAL(s)->j_bdev_handle->bdev,\
+ block, s->s_blocksize)
+#define journal_bread(s, block) __bread(SB_JOURNAL(s)->j_bdev_handle->bdev,\
+ block, s->s_blocksize)
enum reiserfs_bh_state_bits {
BH_JDirty = BH_PrivateStart, /* buffer is in current transaction */
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 3676e02a0232..2138ee7d271d 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -2003,7 +2003,8 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
pathrelse(&s_search_path);
if (update_timestamps) {
- inode->i_mtime = current_time(inode);
+ inode_set_mtime_to_ts(inode,
+ current_time(inode));
inode_set_ctime_current(inode);
}
reiserfs_update_sd(th, inode);
@@ -2028,7 +2029,7 @@ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
update_and_out:
if (update_timestamps) {
/* this is truncate, not file closing */
- inode->i_mtime = current_time(inode);
+ inode_set_mtime_to_ts(inode, current_time(inode));
inode_set_ctime_current(inode);
}
reiserfs_update_sd(th, inode);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 7eaf36b3de12..67b5510beded 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2587,7 +2587,7 @@ out:
return err;
if (inode->i_size < off + len - towrite)
i_size_write(inode, off + len - towrite);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return len - towrite;
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 6000964c2b80..998035a6388e 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -780,7 +780,7 @@ static inline bool reiserfs_posix_acl_list(const char *name,
}
/* This is the implementation for the xattr plugin infrastructure */
-static inline bool reiserfs_xattr_list(const struct xattr_handler **handlers,
+static inline bool reiserfs_xattr_list(const struct xattr_handler * const *handlers,
const char *name, struct dentry *dentry)
{
if (handlers) {
@@ -911,7 +911,7 @@ static int create_privroot(struct dentry *dentry) { return 0; }
#endif
/* Actual operations that are exported to VFS-land */
-const struct xattr_handler *reiserfs_xattr_handlers[] = {
+const struct xattr_handler * const reiserfs_xattr_handlers[] = {
#ifdef CONFIG_REISERFS_FS_XATTR
&reiserfs_xattr_user_handler,
&reiserfs_xattr_trusted_handler,
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 5c35f6c76037..545ad44f96b8 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -322,7 +322,8 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos)
set_nlink(i, 1); /* Hard to decide.. */
i->i_size = be32_to_cpu(ri.size);
- i->i_mtime = i->i_atime = inode_set_ctime(i, 0, 0);
+ inode_set_mtime_to_ts(i,
+ inode_set_atime_to_ts(i, inode_set_ctime(i, 0, 0)));
/* set up mode and ops */
mode = romfs_modemap[nextfh & ROMFH_TYPE];
@@ -593,7 +594,7 @@ static void romfs_kill_sb(struct super_block *sb)
#ifdef CONFIG_ROMFS_ON_BLOCK
if (sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- blkdev_put(sb->s_bdev, sb);
+ bdev_release(sb->s_bdev_handle);
}
#endif
}
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 41daebd220ff..8ca3d7606bb4 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -127,7 +127,7 @@ extern int cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
struct dentry *direntry, const char *symname);
#ifdef CONFIG_CIFS_XATTR
-extern const struct xattr_handler *cifs_xattr_handlers[];
+extern const struct xattr_handler * const cifs_xattr_handlers[];
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
#else
# define cifs_xattr_handlers NULL
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 2108b3b40ce9..cf17e3dd703e 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -1085,7 +1085,8 @@ int cifs_close(struct inode *inode, struct file *file)
!test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
@@ -2596,7 +2597,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
write_data, to - from, &offset);
cifsFileInfo_put(open_file);
/* Does mm or vfs already set times? */
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
if ((bytes_written > 0) && (offset))
rc = 0;
else if (bytes_written < 0)
@@ -4647,11 +4648,13 @@ static void cifs_readahead(struct readahead_control *ractl)
static int cifs_readpage_worker(struct file *file, struct page *page,
loff_t *poffset)
{
+ struct inode *inode = file_inode(file);
+ struct timespec64 atime, mtime;
char *read_data;
int rc;
/* Is the page cached? */
- rc = cifs_readpage_from_fscache(file_inode(file), page);
+ rc = cifs_readpage_from_fscache(inode, page);
if (rc == 0)
goto read_complete;
@@ -4666,11 +4669,10 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
cifs_dbg(FYI, "Bytes read %d\n", rc);
/* we do not want atime to be less than mtime, it broke some apps */
- file_inode(file)->i_atime = current_time(file_inode(file));
- if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
- file_inode(file)->i_atime = file_inode(file)->i_mtime;
- else
- file_inode(file)->i_atime = current_time(file_inode(file));
+ atime = inode_set_atime_to_ts(inode, current_time(inode));
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&atime, &mtime))
+ inode_set_atime_to_ts(inode, inode_get_mtime(inode));
if (PAGE_SIZE > rc)
memset(read_data + rc, 0, PAGE_SIZE - rc);
diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
index 84f3b09367d2..a3d73720914f 100644
--- a/fs/smb/client/fscache.h
+++ b/fs/smb/client/fscache.h
@@ -49,12 +49,12 @@ static inline
void cifs_fscache_fill_coherency(struct inode *inode,
struct cifs_fscache_inode_coherency_data *cd)
{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
memset(cd, 0, sizeof(*cd));
- cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
- cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
+ cd->last_write_time_sec = cpu_to_le64(mtime.tv_sec);
+ cd->last_write_time_nsec = cpu_to_le32(mtime.tv_nsec);
cd->last_change_time_sec = cpu_to_le64(ctime.tv_sec);
cd->last_change_time_nsec = cpu_to_le32(ctime.tv_nsec);
}
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index d7c302442c1e..3abfe77bfa46 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -82,6 +82,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+ struct timespec64 mtime;
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
__func__, cifs_i->uniqueid);
@@ -101,7 +102,8 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
/* revalidate if mtime or size have changed */
fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
- if (timespec64_equal(&inode->i_mtime, &fattr->cf_mtime) &&
+ mtime = inode_get_mtime(inode);
+ if (timespec64_equal(&mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
__func__, cifs_i->uniqueid);
@@ -164,10 +166,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
fattr->cf_ctime = timestamp_truncate(fattr->cf_ctime, inode);
/* we do not want atime to be less than mtime, it broke some apps */
if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime) < 0)
- inode->i_atime = fattr->cf_mtime;
+ inode_set_atime_to_ts(inode, fattr->cf_mtime);
else
- inode->i_atime = fattr->cf_atime;
- inode->i_mtime = fattr->cf_mtime;
+ inode_set_atime_to_ts(inode, fattr->cf_atime);
+ inode_set_mtime_to_ts(inode, fattr->cf_mtime);
inode_set_ctime_to_ts(inode, fattr->cf_ctime);
inode->i_rdev = fattr->cf_rdev;
cifs_nlink_fattr_to_inode(inode, fattr);
@@ -1816,7 +1818,7 @@ out_reval:
when needed */
inode_set_ctime_current(inode);
}
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
cifs_inode = CIFS_I(dir);
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
unlink_out:
@@ -2131,7 +2133,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
cifsInode->time = 0;
inode_set_ctime_current(d_inode(direntry));
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
rmdir_exit:
free_dentry_path(page);
@@ -2337,9 +2339,6 @@ unlink_target:
/* force revalidate to go get info when needed */
CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
- source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir,
- inode_set_ctime_current(target_dir));
-
cifs_rename_exit:
kfree(info_buf_source);
free_dentry_path(page2);
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 9aeecee6b91b..f4849a8ad40b 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -1403,12 +1403,14 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
/* Creation time should not need to be updated on close */
if (file_inf.LastWriteTime)
- inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
+ inode_set_mtime_to_ts(inode,
+ cifs_NTtimeToUnix(file_inf.LastWriteTime));
if (file_inf.ChangeTime)
inode_set_ctime_to_ts(inode,
cifs_NTtimeToUnix(file_inf.ChangeTime));
if (file_inf.LastAccessTime)
- inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
+ inode_set_atime_to_ts(inode,
+ cifs_NTtimeToUnix(file_inf.LastAccessTime));
/*
* i_blocks is not related to (i_size / i_blksize),
diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
index 4ad5531686d8..ac199160bce6 100644
--- a/fs/smb/client/xattr.c
+++ b/fs/smb/client/xattr.c
@@ -478,7 +478,7 @@ static const struct xattr_handler smb3_ntsd_full_xattr_handler = {
.set = cifs_xattr_set,
};
-const struct xattr_handler *cifs_xattr_handlers[] = {
+const struct xattr_handler * const cifs_xattr_handlers[] = {
&cifs_user_xattr_handler,
&cifs_os2_xattr_handler,
&cifs_cifs_acl_xattr_handler,
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 93262ca3f58a..658209839729 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -4834,9 +4834,9 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
file_info->CreationTime = cpu_to_le64(fp->create_time);
- time = ksmbd_UnixTimeToNT(inode->i_atime);
+ time = ksmbd_UnixTimeToNT(inode_get_atime(inode));
file_info->LastAccessTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode->i_mtime);
+ time = ksmbd_UnixTimeToNT(inode_get_mtime(inode));
file_info->LastWriteTime = cpu_to_le64(time);
time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
file_info->ChangeTime = cpu_to_le64(time);
@@ -5443,9 +5443,9 @@ int smb2_close(struct ksmbd_work *work)
rsp->EndOfFile = cpu_to_le64(inode->i_size);
rsp->Attributes = fp->f_ci->m_fattr;
rsp->CreationTime = cpu_to_le64(fp->create_time);
- time = ksmbd_UnixTimeToNT(inode->i_atime);
+ time = ksmbd_UnixTimeToNT(inode_get_atime(inode));
rsp->LastAccessTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode->i_mtime);
+ time = ksmbd_UnixTimeToNT(inode_get_mtime(inode));
rsp->LastWriteTime = cpu_to_le64(time);
time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
rsp->ChangeTime = cpu_to_le64(time);
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index c6e626b00546..aa3411354e66 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -59,9 +59,9 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
- inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime);
- inode->i_atime.tv_sec = inode->i_mtime.tv_sec;
- inode_set_ctime(inode, inode->i_mtime.tv_sec, 0);
+ inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
+ inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
+ inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
inode->i_mode = le16_to_cpu(sqsh_ino->mode);
inode->i_size = 0;
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h
index a6164fdf9435..5a756e6790b5 100644
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -111,4 +111,4 @@ extern const struct address_space_operations squashfs_symlink_aops;
extern const struct inode_operations squashfs_symlink_inode_ops;
/* xattr.c */
-extern const struct xattr_handler *squashfs_xattr_handlers[];
+extern const struct xattr_handler * const squashfs_xattr_handlers[];
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index e1e3f3dd5a06..ce6608cabd49 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -262,7 +262,7 @@ static const struct xattr_handler *squashfs_xattr_handler(int type)
}
}
-const struct xattr_handler *squashfs_xattr_handlers[] = {
+const struct xattr_handler * const squashfs_xattr_handlers[] = {
&squashfs_xattr_user_handler,
&squashfs_xattr_trusted_handler,
&squashfs_xattr_security_handler,
diff --git a/fs/stack.c b/fs/stack.c
index b5e01bdb5f5f..f18920119944 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -66,8 +66,8 @@ void fsstack_copy_attr_all(struct inode *dest, const struct inode *src)
dest->i_uid = src->i_uid;
dest->i_gid = src->i_gid;
dest->i_rdev = src->i_rdev;
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
inode_set_ctime_to_ts(dest, inode_get_ctime(src));
dest->i_blkbits = src->i_blkbits;
dest->i_flags = src->i_flags;
diff --git a/fs/stat.c b/fs/stat.c
index d43a5cc1bfa4..24bb0209e459 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -57,8 +57,8 @@ void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask,
stat->gid = vfsgid_into_kgid(vfsgid);
stat->rdev = inode->i_rdev;
stat->size = i_size_read(inode);
- stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
+ stat->atime = inode_get_atime(inode);
+ stat->mtime = inode_get_mtime(inode);
stat->ctime = inode_get_ctime(inode);
stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
diff --git a/fs/super.c b/fs/super.c
index 2d762ce67f6e..c7b452e12e4c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1419,32 +1419,48 @@ EXPORT_SYMBOL(sget_dev);
#ifdef CONFIG_BLOCK
/*
- * Lock a super block that the callers holds a reference to.
+ * Lock the superblock that is holder of the bdev. Returns the superblock
+ * pointer if we successfully locked the superblock and it is alive. Otherwise
+ * we return NULL and just unlock bdev->bd_holder_lock.
*
- * The caller needs to ensure that the super_block isn't being freed while
- * calling this function, e.g. by holding a lock over the call to this function
- * and the place that clears the pointer to the superblock used by this function
- * before freeing the superblock.
+ * The function must be called with bdev->bd_holder_lock and releases it.
*/
-static bool super_lock_shared_active(struct super_block *sb)
+static struct super_block *bdev_super_lock_shared(struct block_device *bdev)
+ __releases(&bdev->bd_holder_lock)
{
- bool born = super_lock_shared(sb);
+ struct super_block *sb = bdev->bd_holder;
+ bool born;
+
+ lockdep_assert_held(&bdev->bd_holder_lock);
+ lockdep_assert_not_held(&sb->s_umount);
+ lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
+
+ /* Make sure sb doesn't go away from under us */
+ spin_lock(&sb_lock);
+ sb->s_count++;
+ spin_unlock(&sb_lock);
+ mutex_unlock(&bdev->bd_holder_lock);
+ born = super_lock_shared(sb);
if (!born || !sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
super_unlock_shared(sb);
- return false;
+ put_super(sb);
+ return NULL;
}
- return true;
+ /*
+ * The superblock is active and we hold s_umount, we can drop our
+ * temporary reference now.
+ */
+ put_super(sb);
+ return sb;
}
static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
{
- struct super_block *sb = bdev->bd_holder;
-
- /* bd_holder_lock ensures that the sb isn't freed */
- lockdep_assert_held(&bdev->bd_holder_lock);
+ struct super_block *sb;
- if (!super_lock_shared_active(sb))
+ sb = bdev_super_lock_shared(bdev);
+ if (!sb)
return;
if (!surprise)
@@ -1459,11 +1475,10 @@ static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
static void fs_bdev_sync(struct block_device *bdev)
{
- struct super_block *sb = bdev->bd_holder;
-
- lockdep_assert_held(&bdev->bd_holder_lock);
+ struct super_block *sb;
- if (!super_lock_shared_active(sb))
+ sb = bdev_super_lock_shared(bdev);
+ if (!sb)
return;
sync_filesystem(sb);
super_unlock_shared(sb);
@@ -1479,14 +1494,16 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
struct fs_context *fc)
{
blk_mode_t mode = sb_open_mode(sb_flags);
+ struct bdev_handle *bdev_handle;
struct block_device *bdev;
- bdev = blkdev_get_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
- if (IS_ERR(bdev)) {
+ bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
+ if (IS_ERR(bdev_handle)) {
if (fc)
errorf(fc, "%s: Can't open blockdev", fc->source);
- return PTR_ERR(bdev);
+ return PTR_ERR(bdev_handle);
}
+ bdev = bdev_handle->bdev;
/*
* This really should be in blkdev_get_by_dev, but right now can't due
@@ -1494,7 +1511,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
* writable from userspace even for a read-only block device.
*/
if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
- blkdev_put(bdev, sb);
+ bdev_release(bdev_handle);
return -EACCES;
}
@@ -1510,10 +1527,11 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
mutex_unlock(&bdev->bd_fsfreeze_mutex);
if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
- blkdev_put(bdev, sb);
+ bdev_release(bdev_handle);
return -EBUSY;
}
spin_lock(&sb_lock);
+ sb->s_bdev_handle = bdev_handle;
sb->s_bdev = bdev;
sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
if (bdev_stable_writes(bdev))
@@ -1646,7 +1664,7 @@ void kill_block_super(struct super_block *sb)
generic_shutdown_super(sb);
if (bdev) {
sync_blockdev(bdev);
- blkdev_put(bdev, sb);
+ bdev_release(sb->s_bdev_handle);
}
}
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 2f5ead88d00b..2e126d72d619 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -224,7 +224,7 @@ got_it:
memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = sysv_handle_dirsync(dir);
out_page:
@@ -249,7 +249,7 @@ int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
}
de->inode = 0;
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
return sysv_handle_dirsync(inode);
}
@@ -346,7 +346,7 @@ int sysv_set_link(struct sysv_dir_entry *de, struct page *page,
}
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
dir_commit_chunk(page, pos, SYSV_DIRSIZE);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
return sysv_handle_dirsync(inode);
}
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index 6719da5889d9..269df6d49815 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -165,7 +165,7 @@ struct inode * sysv_new_inode(const struct inode * dir, umode_t mode)
dirty_sb(sb);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_ino = fs16_to_cpu(sbi, ino);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_blocks = 0;
memset(SYSV_I(inode)->i_data, 0, sizeof(SYSV_I(inode)->i_data));
SYSV_I(inode)->i_dir_start_lookup = 0;
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 0aa3827d8178..5a915b2e68f5 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -200,11 +200,9 @@ struct inode *sysv_iget(struct super_block *sb, unsigned int ino)
i_gid_write(inode, (gid_t)fs16_to_cpu(sbi, raw_inode->i_gid));
set_nlink(inode, fs16_to_cpu(sbi, raw_inode->i_nlink));
inode->i_size = fs32_to_cpu(sbi, raw_inode->i_size);
- inode->i_atime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_atime);
- inode->i_mtime.tv_sec = fs32_to_cpu(sbi, raw_inode->i_mtime);
+ inode_set_atime(inode, fs32_to_cpu(sbi, raw_inode->i_atime), 0);
+ inode_set_mtime(inode, fs32_to_cpu(sbi, raw_inode->i_mtime), 0);
inode_set_ctime(inode, fs32_to_cpu(sbi, raw_inode->i_ctime), 0);
- inode->i_atime.tv_nsec = 0;
- inode->i_mtime.tv_nsec = 0;
inode->i_blocks = 0;
si = SYSV_I(inode);
@@ -253,9 +251,9 @@ static int __sysv_write_inode(struct inode *inode, int wait)
raw_inode->i_gid = cpu_to_fs16(sbi, fs_high2lowgid(i_gid_read(inode)));
raw_inode->i_nlink = cpu_to_fs16(sbi, inode->i_nlink);
raw_inode->i_size = cpu_to_fs32(sbi, inode->i_size);
- raw_inode->i_atime = cpu_to_fs32(sbi, inode->i_atime.tv_sec);
- raw_inode->i_mtime = cpu_to_fs32(sbi, inode->i_mtime.tv_sec);
- raw_inode->i_ctime = cpu_to_fs32(sbi, inode_get_ctime(inode).tv_sec);
+ raw_inode->i_atime = cpu_to_fs32(sbi, inode_get_atime_sec(inode));
+ raw_inode->i_mtime = cpu_to_fs32(sbi, inode_get_mtime_sec(inode));
+ raw_inode->i_ctime = cpu_to_fs32(sbi, inode_get_ctime_sec(inode));
si = SYSV_I(inode);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index edb94e55de8e..725981474e5f 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -423,7 +423,7 @@ do_indirects:
}
n++;
}
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (IS_SYNC(inode))
sysv_sync_inode (inode);
else
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 891653ba9cf3..429603d865a9 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -152,7 +152,7 @@ struct inode *tracefs_get_inode(struct super_block *sb)
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
return inode;
}
diff --git a/fs/ubifs/crypto.c b/fs/ubifs/crypto.c
index 3125e76376ee..921f9033d0d2 100644
--- a/fs/ubifs/crypto.c
+++ b/fs/ubifs/crypto.c
@@ -88,8 +88,7 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
}
const struct fscrypt_operations ubifs_crypt_operations = {
- .flags = FS_CFLG_OWN_PAGES,
- .key_prefix = "ubifs:",
+ .legacy_key_prefix = "ubifs:",
.get_context = ubifs_crypt_get_context,
.set_context = ubifs_crypt_set_context,
.empty_dir = ubifs_crypt_empty_dir,
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index eef9e527d9ff..d013c5b3f1ed 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -237,14 +237,14 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
pr_err("\tuid %u\n", (unsigned int)i_uid_read(inode));
pr_err("\tgid %u\n", (unsigned int)i_gid_read(inode));
pr_err("\tatime %u.%u\n",
- (unsigned int)inode->i_atime.tv_sec,
- (unsigned int)inode->i_atime.tv_nsec);
+ (unsigned int) inode_get_atime_sec(inode),
+ (unsigned int) inode_get_atime_nsec(inode));
pr_err("\tmtime %u.%u\n",
- (unsigned int)inode->i_mtime.tv_sec,
- (unsigned int)inode->i_mtime.tv_nsec);
+ (unsigned int) inode_get_mtime_sec(inode),
+ (unsigned int) inode_get_mtime_nsec(inode));
pr_err("\tctime %u.%u\n",
- (unsigned int) inode_get_ctime(inode).tv_sec,
- (unsigned int) inode_get_ctime(inode).tv_nsec);
+ (unsigned int) inode_get_ctime_sec(inode),
+ (unsigned int) inode_get_ctime_nsec(inode));
pr_err("\tcreat_sqnum %llu\n", ui->creat_sqnum);
pr_err("\txattr_size %u\n", ui->xattr_size);
pr_err("\txattr_cnt %u\n", ui->xattr_cnt);
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 2f48c58d47cd..7af442de44c3 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -96,7 +96,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir,
inode->i_flags |= S_NOCMTIME;
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_mapping->nrpages = 0;
if (!is_xattr) {
@@ -324,7 +324,8 @@ static int ubifs_create(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -767,7 +768,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
inode_set_ctime_current(inode);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -841,7 +843,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
drop_nlink(inode);
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
if (err)
goto out_cancel;
@@ -944,7 +947,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
drop_nlink(dir);
dir->i_size -= sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 1, 0);
if (err)
goto out_cancel;
@@ -1018,7 +1022,8 @@ static int ubifs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
inc_nlink(dir);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err) {
ubifs_err(c, "cannot create directory, error %d", err);
@@ -1109,7 +1114,8 @@ static int ubifs_mknod(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
@@ -1209,7 +1215,8 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
mutex_lock(&dir_ui->ui_mutex);
dir->i_size += sz_change;
dir_ui->ui_size = dir->i_size;
- dir->i_mtime = inode_set_ctime_to_ts(dir, inode_get_ctime(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_get_ctime(inode)));
err = ubifs_jnl_update(c, dir, &nm, inode, 0, 0);
if (err)
goto out_cancel;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index e5382f0b2587..2e65fd2dbdc3 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1088,9 +1088,9 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr)
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
if (attr->ia_valid & ATTR_ATIME)
- inode->i_atime = attr->ia_atime;
+ inode_set_atime_to_ts(inode, attr->ia_atime);
if (attr->ia_valid & ATTR_MTIME)
- inode->i_mtime = attr->ia_mtime;
+ inode_set_mtime_to_ts(inode, attr->ia_mtime);
if (attr->ia_valid & ATTR_CTIME)
inode_set_ctime_to_ts(inode, attr->ia_ctime);
if (attr->ia_valid & ATTR_MODE) {
@@ -1192,7 +1192,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
ui->ui_size = inode->i_size;
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
/* Other attributes may be changed at the same time as well */
do_attr_changes(inode, attr);
err = ubifs_jnl_truncate(c, inode, old_size, new_size);
@@ -1239,7 +1239,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode,
mutex_lock(&ui->ui_mutex);
if (attr->ia_valid & ATTR_SIZE) {
/* Truncation changes inode [mc]time */
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
/* 'truncate_setsize()' changed @i_size, update @ui_size */
ui->ui_size = inode->i_size;
}
@@ -1365,9 +1365,9 @@ static inline int mctime_update_needed(const struct inode *inode,
const struct timespec64 *now)
{
struct timespec64 ctime = inode_get_ctime(inode);
+ struct timespec64 mtime = inode_get_mtime(inode);
- if (!timespec64_equal(&inode->i_mtime, now) ||
- !timespec64_equal(&ctime, now))
+ if (!timespec64_equal(&mtime, now) || !timespec64_equal(&ctime, now))
return 1;
return 0;
}
@@ -1429,7 +1429,7 @@ static int update_mctime(struct inode *inode)
return err;
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
struct ubifs_inode *ui = ubifs_inode(inode);
mutex_lock(&ui->ui_mutex);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
release = ui->dirty;
mark_inode_dirty_sync(inode);
mutex_unlock(&ui->ui_mutex);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index ffc9beee7be6..d69d2154645b 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -452,12 +452,12 @@ static void pack_inode(struct ubifs_info *c, struct ubifs_ino_node *ino,
ino->ch.node_type = UBIFS_INO_NODE;
ino_key_init_flash(c, &ino->key, inode->i_ino);
ino->creat_sqnum = cpu_to_le64(ui->creat_sqnum);
- ino->atime_sec = cpu_to_le64(inode->i_atime.tv_sec);
- ino->atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
- ino->ctime_sec = cpu_to_le64(inode_get_ctime(inode).tv_sec);
- ino->ctime_nsec = cpu_to_le32(inode_get_ctime(inode).tv_nsec);
- ino->mtime_sec = cpu_to_le64(inode->i_mtime.tv_sec);
- ino->mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ ino->atime_sec = cpu_to_le64(inode_get_atime_sec(inode));
+ ino->atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
+ ino->ctime_sec = cpu_to_le64(inode_get_ctime_sec(inode));
+ ino->ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ino->mtime_sec = cpu_to_le64(inode_get_mtime_sec(inode));
+ ino->mtime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
ino->uid = cpu_to_le32(i_uid_read(inode));
ino->gid = cpu_to_le32(i_gid_read(inode));
ino->mode = cpu_to_le32(inode->i_mode);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b08fb28d16b5..366941d4a18a 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -142,10 +142,10 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
set_nlink(inode, le32_to_cpu(ino->nlink));
i_uid_write(inode, le32_to_cpu(ino->uid));
i_gid_write(inode, le32_to_cpu(ino->gid));
- inode->i_atime.tv_sec = (int64_t)le64_to_cpu(ino->atime_sec);
- inode->i_atime.tv_nsec = le32_to_cpu(ino->atime_nsec);
- inode->i_mtime.tv_sec = (int64_t)le64_to_cpu(ino->mtime_sec);
- inode->i_mtime.tv_nsec = le32_to_cpu(ino->mtime_nsec);
+ inode_set_atime(inode, (int64_t)le64_to_cpu(ino->atime_sec),
+ le32_to_cpu(ino->atime_nsec));
+ inode_set_mtime(inode, (int64_t)le64_to_cpu(ino->mtime_sec),
+ le32_to_cpu(ino->mtime_nsec));
inode_set_ctime(inode, (int64_t)le64_to_cpu(ino->ctime_sec),
le32_to_cpu(ino->ctime_nsec));
inode->i_mode = le32_to_cpu(ino->mode);
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index ebb3ad6b5e7e..62633816d7d0 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -2043,7 +2043,7 @@ ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf,
size_t size);
#ifdef CONFIG_UBIFS_FS_XATTR
-extern const struct xattr_handler *ubifs_xattr_handlers[];
+extern const struct xattr_handler * const ubifs_xattr_handlers[];
ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size);
void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum);
int ubifs_purge_xattrs(struct inode *host);
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 406c82eab513..0847db521984 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -735,7 +735,7 @@ static const struct xattr_handler ubifs_security_xattr_handler = {
};
#endif
-const struct xattr_handler *ubifs_xattr_handlers[] = {
+const struct xattr_handler * const ubifs_xattr_handlers[] = {
&ubifs_user_xattr_handler,
&ubifs_trusted_xattr_handler,
#ifdef CONFIG_UBIFS_FS_SECURITY
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 6b558cbbeb6b..5f1f969f4134 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -100,8 +100,8 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
- iinfo->i_crtime = inode->i_mtime;
+ simple_inode_init_ts(inode);
+ iinfo->i_crtime = inode_get_mtime(inode);
if (unlikely(insert_inode_locked(inode) < 0)) {
make_bad_inode(inode);
iput(inode);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index a17a6184cc39..d8493449d4c5 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1296,7 +1296,7 @@ set_size:
goto out_unlock;
}
update_time:
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
@@ -1327,7 +1327,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
uint32_t uid, gid;
- struct timespec64 ctime;
+ struct timespec64 ts;
reread:
if (iloc->partitionReferenceNum >= sbi->s_partitions) {
@@ -1504,10 +1504,12 @@ reread:
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
- udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
- udf_disk_stamp_to_time(&ctime, fe->attrTime);
- inode_set_ctime_to_ts(inode, ctime);
+ udf_disk_stamp_to_time(&ts, fe->accessTime);
+ inode_set_atime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, fe->modificationTime);
+ inode_set_mtime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, fe->attrTime);
+ inode_set_ctime_to_ts(inode, ts);
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
@@ -1519,11 +1521,13 @@ reread:
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
- udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
- udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
+ udf_disk_stamp_to_time(&ts, efe->accessTime);
+ inode_set_atime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, efe->modificationTime);
+ inode_set_mtime_to_ts(inode, ts);
+ udf_disk_stamp_to_time(&ts, efe->attrTime);
+ inode_set_ctime_to_ts(inode, ts);
udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
- udf_disk_stamp_to_time(&ctime, efe->attrTime);
- inode_set_ctime_to_ts(inode, ctime);
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
@@ -1798,8 +1802,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
- udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
- udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
+ udf_time_to_disk_stamp(&fe->accessTime, inode_get_atime(inode));
+ udf_time_to_disk_stamp(&fe->modificationTime, inode_get_mtime(inode));
udf_time_to_disk_stamp(&fe->attrTime, inode_get_ctime(inode));
memset(&(fe->impIdent), 0, sizeof(struct regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
@@ -1829,12 +1833,14 @@ static int udf_update_inode(struct inode *inode, int do_sync)
cpu_to_le32(inode->i_sb->s_blocksize);
}
- udf_adjust_time(iinfo, inode->i_atime);
- udf_adjust_time(iinfo, inode->i_mtime);
+ udf_adjust_time(iinfo, inode_get_atime(inode));
+ udf_adjust_time(iinfo, inode_get_mtime(inode));
udf_adjust_time(iinfo, inode_get_ctime(inode));
- udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
- udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
+ udf_time_to_disk_stamp(&efe->accessTime,
+ inode_get_atime(inode));
+ udf_time_to_disk_stamp(&efe->modificationTime,
+ inode_get_mtime(inode));
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
udf_time_to_disk_stamp(&efe->attrTime, inode_get_ctime(inode));
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index ae55ab8859b6..3508ac484da3 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -365,7 +365,7 @@ static int udf_add_nondir(struct dentry *dentry, struct inode *inode)
*(__le32 *)((struct allocDescImpUse *)iter.fi.icb.impUse)->impUse =
cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL);
udf_fiiter_write_fi(&iter, NULL);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, false, 1);
@@ -471,7 +471,7 @@ static int udf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
udf_fiiter_release(&iter);
udf_add_fid_counter(dir->i_sb, true, 1);
inc_nlink(dir);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
d_instantiate_new(dentry, inode);
@@ -523,8 +523,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_size = 0;
inode_dec_link_count(dir);
udf_add_fid_counter(dir->i_sb, true, -1);
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
mark_inode_dirty(dir);
ret = 0;
end_rmdir:
@@ -555,7 +555,7 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
set_nlink(inode, 1);
}
udf_fiiter_delete_entry(&iter);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
inode_dec_link_count(inode);
udf_add_fid_counter(dir->i_sb, false, -1);
@@ -748,7 +748,7 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
udf_add_fid_counter(dir->i_sb, false, 1);
inode_set_ctime_current(inode);
mark_inode_dirty(inode);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ihold(inode);
d_instantiate(dentry, inode);
@@ -866,8 +866,8 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
udf_add_fid_counter(old_dir->i_sb, S_ISDIR(new_inode->i_mode),
-1);
}
- old_dir->i_mtime = inode_set_ctime_current(old_dir);
- new_dir->i_mtime = inode_set_ctime_current(new_dir);
+ inode_set_mtime_to_ts(old_dir, inode_set_ctime_current(old_dir));
+ inode_set_mtime_to_ts(new_dir, inode_set_ctime_current(new_dir));
mark_inode_dirty(old_dir);
mark_inode_dirty(new_dir);
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index fd57f03b6c93..27c85d92d1dc 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -107,7 +107,7 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
ufs_commit_chunk(page, pos, len);
ufs_put_page(page);
if (update_times)
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
ufs_handle_dirsync(dir);
}
@@ -397,7 +397,7 @@ got_it:
ufs_set_de_type(sb, de, inode->i_mode);
ufs_commit_chunk(page, pos, rec_len);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
mark_inode_dirty(dir);
err = ufs_handle_dirsync(dir);
@@ -539,7 +539,7 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
pde->d_reclen = cpu_to_fs16(sb, to - from);
dir->d_ino = 0;
ufs_commit_chunk(page, pos, to - from);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
err = ufs_handle_dirsync(inode);
out:
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index a1e7bd9d1f98..73531827ecee 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -292,7 +292,7 @@ cg_found:
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
inode->i_blocks = 0;
inode->i_generation = 0;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
ufsi->i_flags = UFS_I(dir)->i_flags;
ufsi->i_lastfrag = 0;
ufsi->i_shadow = 0;
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 21a4779a2de5..338e4b97312f 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -579,13 +579,15 @@ static int ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode));
inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size);
- inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec);
+ inode_set_atime(inode,
+ (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec),
+ 0);
inode_set_ctime(inode,
(signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec),
0);
- inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec);
- inode->i_mtime.tv_nsec = 0;
- inode->i_atime.tv_nsec = 0;
+ inode_set_mtime(inode,
+ (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec),
+ 0);
inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
inode->i_generation = fs32_to_cpu(sb, ufs_inode->ui_gen);
ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
@@ -626,12 +628,12 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
i_gid_write(inode, fs32_to_cpu(sb, ufs2_inode->ui_gid));
inode->i_size = fs64_to_cpu(sb, ufs2_inode->ui_size);
- inode->i_atime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_atime);
+ inode_set_atime(inode, fs64_to_cpu(sb, ufs2_inode->ui_atime),
+ fs32_to_cpu(sb, ufs2_inode->ui_atimensec));
inode_set_ctime(inode, fs64_to_cpu(sb, ufs2_inode->ui_ctime),
fs32_to_cpu(sb, ufs2_inode->ui_ctimensec));
- inode->i_mtime.tv_sec = fs64_to_cpu(sb, ufs2_inode->ui_mtime);
- inode->i_atime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_atimensec);
- inode->i_mtime.tv_nsec = fs32_to_cpu(sb, ufs2_inode->ui_mtimensec);
+ inode_set_mtime(inode, fs64_to_cpu(sb, ufs2_inode->ui_mtime),
+ fs32_to_cpu(sb, ufs2_inode->ui_mtimensec));
inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
inode->i_generation = fs32_to_cpu(sb, ufs2_inode->ui_gen);
ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
@@ -725,12 +727,14 @@ static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode));
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
- ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec);
+ ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb,
+ inode_get_atime_sec(inode));
ufs_inode->ui_atime.tv_usec = 0;
ufs_inode->ui_ctime.tv_sec = cpu_to_fs32(sb,
- inode_get_ctime(inode).tv_sec);
+ inode_get_ctime_sec(inode));
ufs_inode->ui_ctime.tv_usec = 0;
- ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb, inode->i_mtime.tv_sec);
+ ufs_inode->ui_mtime.tv_sec = cpu_to_fs32(sb,
+ inode_get_mtime_sec(inode));
ufs_inode->ui_mtime.tv_usec = 0;
ufs_inode->ui_blocks = cpu_to_fs32(sb, inode->i_blocks);
ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
@@ -770,13 +774,15 @@ static void ufs2_update_inode(struct inode *inode, struct ufs2_inode *ufs_inode)
ufs_inode->ui_gid = cpu_to_fs32(sb, i_gid_read(inode));
ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size);
- ufs_inode->ui_atime = cpu_to_fs64(sb, inode->i_atime.tv_sec);
- ufs_inode->ui_atimensec = cpu_to_fs32(sb, inode->i_atime.tv_nsec);
- ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime(inode).tv_sec);
+ ufs_inode->ui_atime = cpu_to_fs64(sb, inode_get_atime_sec(inode));
+ ufs_inode->ui_atimensec = cpu_to_fs32(sb,
+ inode_get_atime_nsec(inode));
+ ufs_inode->ui_ctime = cpu_to_fs64(sb, inode_get_ctime_sec(inode));
ufs_inode->ui_ctimensec = cpu_to_fs32(sb,
- inode_get_ctime(inode).tv_nsec);
- ufs_inode->ui_mtime = cpu_to_fs64(sb, inode->i_mtime.tv_sec);
- ufs_inode->ui_mtimensec = cpu_to_fs32(sb, inode->i_mtime.tv_nsec);
+ inode_get_ctime_nsec(inode));
+ ufs_inode->ui_mtime = cpu_to_fs64(sb, inode_get_mtime_sec(inode));
+ ufs_inode->ui_mtimensec = cpu_to_fs32(sb,
+ inode_get_mtime_nsec(inode));
ufs_inode->ui_blocks = cpu_to_fs64(sb, inode->i_blocks);
ufs_inode->ui_flags = cpu_to_fs32(sb, ufsi->i_flags);
@@ -1208,7 +1214,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
truncate_setsize(inode, size);
ufs_truncate_blocks(inode);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
mark_inode_dirty(inode);
out:
UFSD("EXIT: err %d\n", err);
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
index 83f20dd15522..72ac9320e6a3 100644
--- a/fs/vboxsf/utils.c
+++ b/fs/vboxsf/utils.c
@@ -126,12 +126,12 @@ int vboxsf_init_inode(struct vboxsf_sbi *sbi, struct inode *inode,
do_div(allocated, 512);
inode->i_blocks = allocated;
- inode->i_atime = ns_to_timespec64(
- info->access_time.ns_relative_to_unix_epoch);
+ inode_set_atime_to_ts(inode,
+ ns_to_timespec64(info->access_time.ns_relative_to_unix_epoch));
inode_set_ctime_to_ts(inode,
ns_to_timespec64(info->change_time.ns_relative_to_unix_epoch));
- inode->i_mtime = ns_to_timespec64(
- info->modification_time.ns_relative_to_unix_epoch);
+ inode_set_mtime_to_ts(inode,
+ ns_to_timespec64(info->modification_time.ns_relative_to_unix_epoch));
return 0;
}
@@ -194,7 +194,7 @@ int vboxsf_inode_revalidate(struct dentry *dentry)
struct vboxsf_sbi *sbi;
struct vboxsf_inode *sf_i;
struct shfl_fsobjinfo info;
- struct timespec64 prev_mtime;
+ struct timespec64 mtime, prev_mtime;
struct inode *inode;
int err;
@@ -202,7 +202,7 @@ int vboxsf_inode_revalidate(struct dentry *dentry)
return -EINVAL;
inode = d_inode(dentry);
- prev_mtime = inode->i_mtime;
+ prev_mtime = inode_get_mtime(inode);
sf_i = VBOXSF_I(inode);
sbi = VBOXSF_SBI(dentry->d_sb);
if (!sf_i->force_restat) {
@@ -225,7 +225,8 @@ int vboxsf_inode_revalidate(struct dentry *dentry)
* page-cache for it. Note this also gets triggered by our own writes,
* this is unavoidable.
*/
- if (timespec64_compare(&inode->i_mtime, &prev_mtime) > 0)
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&mtime, &prev_mtime) > 0)
invalidate_inode_pages2(inode->i_mapping);
return 0;
diff --git a/fs/xattr.c b/fs/xattr.c
index efd4736bc94b..09d927603433 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -56,7 +56,7 @@ strcmp_prefix(const char *a, const char *a_prefix)
static const struct xattr_handler *
xattr_resolve_name(struct inode *inode, const char **name)
{
- const struct xattr_handler **handlers = inode->i_sb->s_xattr;
+ const struct xattr_handler * const *handlers = inode->i_sb->s_xattr;
const struct xattr_handler *handler;
if (!(inode->i_opflags & IOP_XATTR)) {
@@ -162,7 +162,7 @@ xattr_permission(struct mnt_idmap *idmap, struct inode *inode,
int
xattr_supports_user_prefix(struct inode *inode)
{
- const struct xattr_handler **handlers = inode->i_sb->s_xattr;
+ const struct xattr_handler * const *handlers = inode->i_sb->s_xattr;
const struct xattr_handler *handler;
if (!(inode->i_opflags & IOP_XATTR)) {
@@ -999,7 +999,7 @@ int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name)
ssize_t
generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
- const struct xattr_handler *handler, **handlers = dentry->d_sb->s_xattr;
+ const struct xattr_handler *handler, * const *handlers = dentry->d_sb->s_xattr;
ssize_t remaining_size = buffer_size;
int err = 0;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index a35781577cad..543f3748c2a3 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -220,8 +220,10 @@ xfs_inode_from_disk(
* a time before epoch is converted to a time long after epoch
* on 64 bit systems.
*/
- inode->i_atime = xfs_inode_from_disk_ts(from, from->di_atime);
- inode->i_mtime = xfs_inode_from_disk_ts(from, from->di_mtime);
+ inode_set_atime_to_ts(inode,
+ xfs_inode_from_disk_ts(from, from->di_atime));
+ inode_set_mtime_to_ts(inode,
+ xfs_inode_from_disk_ts(from, from->di_mtime));
inode_set_ctime_to_ts(inode,
xfs_inode_from_disk_ts(from, from->di_ctime));
@@ -315,8 +317,8 @@ xfs_inode_to_disk(
to->di_projid_lo = cpu_to_be16(ip->i_projid & 0xffff);
to->di_projid_hi = cpu_to_be16(ip->i_projid >> 16);
- to->di_atime = xfs_inode_to_disk_ts(ip, inode->i_atime);
- to->di_mtime = xfs_inode_to_disk_ts(ip, inode->i_mtime);
+ to->di_atime = xfs_inode_to_disk_ts(ip, inode_get_atime(inode));
+ to->di_mtime = xfs_inode_to_disk_ts(ip, inode_get_mtime(inode));
to->di_ctime = xfs_inode_to_disk_ts(ip, inode_get_ctime(inode));
to->di_nlink = cpu_to_be32(inode->i_nlink);
to->di_gen = cpu_to_be32(inode->i_generation);
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index fa180ab66b73..396648acb5be 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -970,6 +970,7 @@ xfs_rtfree_extent(
xfs_mount_t *mp; /* file system mount structure */
xfs_fsblock_t sb; /* summary file block number */
struct xfs_buf *sumbp = NULL; /* summary file block buffer */
+ struct timespec64 atime;
mp = tp->t_mountp;
@@ -999,7 +1000,10 @@ xfs_rtfree_extent(
mp->m_sb.sb_rextents) {
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM))
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
- *(uint64_t *)&VFS_I(mp->m_rbmip)->i_atime = 0;
+
+ atime = inode_get_atime(VFS_I(mp->m_rbmip));
+ *((uint64_t *)&atime) = 0;
+ inode_set_atime_to_ts(VFS_I(mp->m_rbmip), atime);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
}
return 0;
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 6b2296ff248a..70e97ea6eee7 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -65,7 +65,7 @@ xfs_trans_ichgtime(
tv = current_time(inode);
if (flags & XFS_ICHGTIME_MOD)
- inode->i_mtime = tv;
+ inode_set_mtime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CHG)
inode_set_ctime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CREATE)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index fcefab687285..40e0a1f1f753 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1644,7 +1644,7 @@ xfs_swap_extents(
uint64_t f;
int resblks = 0;
unsigned int flags = 0;
- struct timespec64 ctime;
+ struct timespec64 ctime, mtime;
/*
* Lock the inodes against other IO, page faults and truncate to
@@ -1758,10 +1758,11 @@ xfs_swap_extents(
* under it.
*/
ctime = inode_get_ctime(VFS_I(ip));
+ mtime = inode_get_mtime(VFS_I(ip));
if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
(sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
- (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
- (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
+ (sbp->bs_mtime.tv_sec != mtime.tv_sec) ||
+ (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) {
error = -EBUSY;
goto out_trans_cancel;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c1ece4a08ff4..003e157241da 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1945,8 +1945,6 @@ void
xfs_free_buftarg(
struct xfs_buftarg *btp)
{
- struct block_device *bdev = btp->bt_bdev;
-
unregister_shrinker(&btp->bt_shrinker);
ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
percpu_counter_destroy(&btp->bt_io_count);
@@ -1954,8 +1952,8 @@ xfs_free_buftarg(
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
/* the main block device is closed by kill_block_super */
- if (bdev != btp->bt_mount->m_super->s_bdev)
- blkdev_put(bdev, btp->bt_mount->m_super);
+ if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
+ bdev_release(btp->bt_bdev_handle);
kmem_free(btp);
}
@@ -1990,16 +1988,15 @@ xfs_setsize_buftarg(
*/
STATIC int
xfs_setsize_buftarg_early(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
+ xfs_buftarg_t *btp)
{
- return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
+ return xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev));
}
struct xfs_buftarg *
xfs_alloc_buftarg(
struct xfs_mount *mp,
- struct block_device *bdev)
+ struct bdev_handle *bdev_handle)
{
xfs_buftarg_t *btp;
const struct dax_holder_operations *ops = NULL;
@@ -2010,9 +2007,10 @@ xfs_alloc_buftarg(
btp = kmem_zalloc(sizeof(*btp), KM_NOFS);
btp->bt_mount = mp;
- btp->bt_dev = bdev->bd_dev;
- btp->bt_bdev = bdev;
- btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off,
+ btp->bt_bdev_handle = bdev_handle;
+ btp->bt_dev = bdev_handle->bdev->bd_dev;
+ btp->bt_bdev = bdev_handle->bdev;
+ btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
mp, ops);
/*
@@ -2022,7 +2020,7 @@ xfs_alloc_buftarg(
ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
DEFAULT_RATELIMIT_BURST);
- if (xfs_setsize_buftarg_early(btp, bdev))
+ if (xfs_setsize_buftarg_early(btp))
goto error_free;
if (list_lru_init(&btp->bt_lru))
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index df8f47953bb4..ada9d310b7d3 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -98,6 +98,7 @@ typedef unsigned int xfs_buf_flags_t;
*/
typedef struct xfs_buftarg {
dev_t bt_dev;
+ struct bdev_handle *bt_bdev_handle;
struct block_device *bt_bdev;
struct dax_device *bt_daxdev;
u64 bt_dax_part_off;
@@ -364,7 +365,7 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
* Handling of buftargs.
*/
struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
- struct block_device *bdev);
+ struct bdev_handle *bdev_handle);
extern void xfs_free_buftarg(struct xfs_buftarg *);
extern void xfs_buftarg_wait(struct xfs_buftarg *);
extern void xfs_buftarg_drain(struct xfs_buftarg *);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 4d55f58d99b7..36f5cf802c07 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -844,8 +844,8 @@ xfs_init_new_inode(
ASSERT(ip->i_nblocks == 0);
tv = inode_set_ctime_current(inode);
- inode->i_mtime = tv;
- inode->i_atime = tv;
+ inode_set_mtime_to_ts(inode, tv);
+ inode_set_atime_to_ts(inode, tv);
ip->i_extsize = 0;
ip->i_diflags = 0;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 127b2410eb20..17c51804f9c6 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -526,8 +526,8 @@ xfs_inode_to_log_dinode(
to->di_projid_hi = ip->i_projid >> 16;
memset(to->di_pad3, 0, sizeof(to->di_pad3));
- to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode->i_atime);
- to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode->i_mtime);
+ to->di_atime = xfs_inode_to_log_dinode_ts(ip, inode_get_atime(inode));
+ to->di_mtime = xfs_inode_to_log_dinode_ts(ip, inode_get_mtime(inode));
to->di_ctime = xfs_inode_to_log_dinode_ts(ip, inode_get_ctime(inode));
to->di_nlink = inode->i_nlink;
to->di_gen = inode->i_generation;
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 2b3b05c28e9e..fdfda4fba12b 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -572,8 +572,8 @@ xfs_vn_getattr(
stat->uid = vfsuid_into_kuid(vfsuid);
stat->gid = vfsgid_into_kgid(vfsgid);
stat->ino = ip->i_ino;
- stat->atime = inode->i_atime;
- stat->mtime = inode->i_mtime;
+ stat->atime = inode_get_atime(inode);
+ stat->mtime = inode_get_mtime(inode);
stat->ctime = inode_get_ctime(inode);
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
@@ -1067,9 +1067,9 @@ xfs_vn_update_time(
now = current_time(inode);
if (flags & S_MTIME)
- inode->i_mtime = now;
+ inode_set_mtime_to_ts(inode, now);
if (flags & S_ATIME)
- inode->i_atime = now;
+ inode_set_atime_to_ts(inode, now);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, log_flags);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f5377ba5967a..14462614fcc8 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -107,12 +107,12 @@ xfs_bulkstat_one_int(
buf->bs_size = ip->i_disk_size;
buf->bs_nlink = inode->i_nlink;
- buf->bs_atime = inode->i_atime.tv_sec;
- buf->bs_atime_nsec = inode->i_atime.tv_nsec;
- buf->bs_mtime = inode->i_mtime.tv_sec;
- buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
- buf->bs_ctime = inode_get_ctime(inode).tv_sec;
- buf->bs_ctime_nsec = inode_get_ctime(inode).tv_nsec;
+ buf->bs_atime = inode_get_atime_sec(inode);
+ buf->bs_atime_nsec = inode_get_atime_nsec(inode);
+ buf->bs_mtime = inode_get_mtime_sec(inode);
+ buf->bs_mtime_nsec = inode_get_mtime_nsec(inode);
+ buf->bs_ctime = inode_get_ctime_sec(inode);
+ buf->bs_ctime_nsec = inode_get_ctime_nsec(inode);
buf->bs_gen = inode->i_generation;
buf->bs_mode = inode->i_mode;
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 16534e9873f6..2e1a4e5cd03d 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1420,25 +1420,26 @@ xfs_rtunmount_inodes(
*/
int /* error */
xfs_rtpick_extent(
- xfs_mount_t *mp, /* file system mount point */
- xfs_trans_t *tp, /* transaction pointer */
- xfs_extlen_t len, /* allocation length (rtextents) */
- xfs_rtblock_t *pick) /* result rt extent */
-{
- xfs_rtblock_t b; /* result block */
- int log2; /* log of sequence number */
- uint64_t resid; /* residual after log removed */
- uint64_t seq; /* sequence number of file creation */
- uint64_t *seqp; /* pointer to seqno in inode */
+ xfs_mount_t *mp, /* file system mount point */
+ xfs_trans_t *tp, /* transaction pointer */
+ xfs_extlen_t len, /* allocation length (rtextents) */
+ xfs_rtblock_t *pick) /* result rt extent */
+ {
+ xfs_rtblock_t b; /* result block */
+ int log2; /* log of sequence number */
+ uint64_t resid; /* residual after log removed */
+ uint64_t seq; /* sequence number of file creation */
+ struct timespec64 ts; /* temporary timespec64 storage */
ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
- seqp = (uint64_t *)&VFS_I(mp->m_rbmip)->i_atime;
if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
- *seqp = 0;
+ seq = 0;
+ } else {
+ ts = inode_get_atime(VFS_I(mp->m_rbmip));
+ seq = (uint64_t)ts.tv_sec;
}
- seq = *seqp;
if ((log2 = xfs_highbit64(seq)) == -1)
b = 0;
else {
@@ -1450,7 +1451,8 @@ xfs_rtpick_extent(
if (b + len > mp->m_sb.sb_rextents)
b = mp->m_sb.sb_rextents - len;
}
- *seqp = seq + 1;
+ ts.tv_sec = (time64_t)seq + 1;
+ inode_set_atime_to_ts(VFS_I(mp->m_rbmip), ts);
xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
*pick = b;
return 0;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 819a3568b28f..f0ae07828153 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -361,14 +361,15 @@ STATIC int
xfs_blkdev_get(
xfs_mount_t *mp,
const char *name,
- struct block_device **bdevp)
+ struct bdev_handle **handlep)
{
int error = 0;
- *bdevp = blkdev_get_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
- mp->m_super, &fs_holder_ops);
- if (IS_ERR(*bdevp)) {
- error = PTR_ERR(*bdevp);
+ *handlep = bdev_open_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE,
+ mp->m_super, &fs_holder_ops);
+ if (IS_ERR(*handlep)) {
+ error = PTR_ERR(*handlep);
+ *handlep = NULL;
xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
}
@@ -433,7 +434,7 @@ xfs_open_devices(
{
struct super_block *sb = mp->m_super;
struct block_device *ddev = sb->s_bdev;
- struct block_device *logdev = NULL, *rtdev = NULL;
+ struct bdev_handle *logdev_handle = NULL, *rtdev_handle = NULL;
int error;
/*
@@ -446,17 +447,19 @@ xfs_open_devices(
* Open real time and log devices - order is important.
*/
if (mp->m_logname) {
- error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
+ error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle);
if (error)
goto out_relock;
}
if (mp->m_rtname) {
- error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
+ error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_handle);
if (error)
goto out_close_logdev;
- if (rtdev == ddev || rtdev == logdev) {
+ if (rtdev_handle->bdev == ddev ||
+ (logdev_handle &&
+ rtdev_handle->bdev == logdev_handle->bdev)) {
xfs_warn(mp,
"Cannot mount filesystem with identical rtdev and ddev/logdev.");
error = -EINVAL;
@@ -468,22 +471,25 @@ xfs_open_devices(
* Setup xfs_mount buffer target pointers
*/
error = -ENOMEM;
- mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
+ mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_handle);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
- if (rtdev) {
- mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
+ if (rtdev_handle) {
+ mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_handle);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
- if (logdev && logdev != ddev) {
- mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
+ if (logdev_handle && logdev_handle->bdev != ddev) {
+ mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_handle);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
mp->m_logdev_targp = mp->m_ddev_targp;
+ /* Handle won't be used, drop it */
+ if (logdev_handle)
+ bdev_release(logdev_handle);
}
error = 0;
@@ -497,11 +503,11 @@ out_relock:
out_free_ddev_targ:
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
- if (rtdev)
- blkdev_put(rtdev, sb);
+ if (rtdev_handle)
+ bdev_release(rtdev_handle);
out_close_logdev:
- if (logdev && logdev != ddev)
- blkdev_put(logdev, sb);
+ if (logdev_handle)
+ bdev_release(logdev_handle);
goto out_relock;
}
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index a3975f325f4e..987843f84d03 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -186,7 +186,7 @@ static const struct xattr_handler xfs_xattr_security_handler = {
.set = xfs_xattr_set,
};
-const struct xattr_handler *xfs_xattr_handlers[] = {
+const struct xattr_handler * const xfs_xattr_handlers[] = {
&xfs_xattr_user_handler,
&xfs_xattr_trusted_handler,
&xfs_xattr_security_handler,
diff --git a/fs/xfs/xfs_xattr.h b/fs/xfs/xfs_xattr.h
index 2b09133b1b9b..cec766cad26c 100644
--- a/fs/xfs/xfs_xattr.h
+++ b/fs/xfs/xfs_xattr.h
@@ -8,6 +8,6 @@
int xfs_attr_change(struct xfs_da_args *args);
-extern const struct xattr_handler *xfs_xattr_handlers[];
+extern const struct xattr_handler * const xfs_xattr_handlers[];
#endif /* __XFS_XATTR_H__ */
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 9d1a9808fbbb..e6a75401677d 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -658,8 +658,8 @@ static struct inode *zonefs_get_file_inode(struct inode *dir,
inode->i_ino = ino;
inode->i_mode = z->z_mode;
- inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
- inode_get_ctime(dir));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, inode_get_ctime(dir))));
inode->i_uid = z->z_uid;
inode->i_gid = z->z_gid;
inode->i_size = z->z_wpoffset;
@@ -695,8 +695,8 @@ static struct inode *zonefs_get_zgroup_inode(struct super_block *sb,
inode->i_ino = ino;
inode_init_owner(&nop_mnt_idmap, inode, root, S_IFDIR | 0555);
inode->i_size = sbi->s_zgroup[ztype].g_nr_zones;
- inode->i_mtime = inode->i_atime = inode_set_ctime_to_ts(inode,
- inode_get_ctime(root));
+ inode_set_mtime_to_ts(inode,
+ inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, inode_get_ctime(root))));
inode->i_private = &sbi->s_zgroup[ztype];
set_nlink(inode, 2);
@@ -1319,7 +1319,7 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_ino = bdev_nr_zones(sb->s_bdev);
inode->i_mode = S_IFDIR | 0555;
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_op = &zonefs_dir_inode_operations;
inode->i_fop = &zonefs_dir_operations;
inode->i_size = 2;
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index f9544d9b670d..ac65f0626cfc 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -68,8 +68,7 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
- DRM_SCHED_PRIORITY_COUNT,
- DRM_SCHED_PRIORITY_UNSET = -2
+ DRM_SCHED_PRIORITY_COUNT
};
/* Used to chose between FIFO and RR jobs scheduling */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index bb3cb005873e..e748bc957d83 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -82,6 +82,8 @@ struct timer_map {
struct arch_timer_context *emul_ptimer;
};
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
+
struct arch_timer_cpu {
struct arch_timer_context timers[NR_KVM_TIMERS];
@@ -145,4 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt);
void kvm_timer_cpu_up(void);
void kvm_timer_cpu_down(void);
+static inline bool has_cntpoff(void)
+{
+ return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index eef450f25982..51fa7ffdee83 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1479,14 +1479,25 @@ extern const struct blk_holder_ops fs_holder_ops;
#define sb_open_mode(flags) \
(BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+struct bdev_handle {
+ struct block_device *bdev;
+ void *holder;
+ blk_mode_t mode;
+};
+
struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
void *holder, const struct blk_holder_ops *hops);
+struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
void blkdev_put(struct block_device *bdev, void *holder);
+void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
struct block_device *blkdev_get_no_open(dev_t dev);
diff --git a/drivers/md/bcache/closure.h b/include/linux/closure.h
index c88cdc4ae4ec..722a586bb224 100644
--- a/drivers/md/bcache/closure.h
+++ b/include/linux/closure.h
@@ -155,7 +155,7 @@ struct closure {
atomic_t remaining;
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
@@ -172,6 +172,11 @@ void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void __closure_sync(struct closure *cl);
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
/**
* closure_sync - sleep until a closure a closure has nothing left to wait on
*
@@ -180,19 +185,17 @@ void __closure_sync(struct closure *cl);
*/
static inline void closure_sync(struct closure *cl)
{
- if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+ if (closure_nr_remaining(cl) != 1)
__closure_sync(cl);
}
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
-void closure_debug_init(void);
void closure_debug_create(struct closure *cl);
void closure_debug_destroy(struct closure *cl);
#else
-static inline void closure_debug_init(void) {}
static inline void closure_debug_create(struct closure *cl) {}
static inline void closure_debug_destroy(struct closure *cl) {}
@@ -200,21 +203,21 @@ static inline void closure_debug_destroy(struct closure *cl) {}
static inline void closure_set_ip(struct closure *cl)
{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
cl->ip = _THIS_IP_;
#endif
}
static inline void closure_set_ret_ip(struct closure *cl)
{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
cl->ip = _RET_IP_;
#endif
}
static inline void closure_set_waiting(struct closure *cl, unsigned long f)
{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
cl->waiting_on = f;
#endif
}
@@ -243,6 +246,7 @@ static inline void closure_queue(struct closure *cl)
*/
BUILD_BUG_ON(offsetof(struct closure, fn)
!= offsetof(struct work_struct, func));
+
if (wq) {
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
@@ -255,7 +259,7 @@ static inline void closure_queue(struct closure *cl)
*/
static inline void closure_get(struct closure *cl)
{
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
BUG_ON((atomic_inc_return(&cl->remaining) &
CLOSURE_REMAINING_MASK) <= 1);
#else
@@ -271,7 +275,7 @@ static inline void closure_get(struct closure *cl)
*/
static inline void closure_init(struct closure *cl, struct closure *parent)
{
- memset(cl, 0, sizeof(struct closure));
+ cl->fn = NULL;
cl->parent = parent;
if (parent)
closure_get(parent);
@@ -375,4 +379,26 @@ static inline void closure_call(struct closure *cl, closure_fn fn,
continue_at_nobarrier(cl, fn, wq);
}
+#define __closure_wait_event(waitlist, _cond) \
+do { \
+ struct closure cl; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) \
+ break; \
+ closure_sync(&cl); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond) \
+do { \
+ if (!(_cond)) \
+ __closure_wait_event(waitlist, _cond); \
+} while (0)
+
#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6b351e009f59..3da2f0545d5d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -251,6 +251,7 @@ extern struct dentry * d_make_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */
extern void d_genocide(struct dentry *);
+extern void d_mark_tmpfile(struct file *, struct inode *);
extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 69d0435c7ebb..772ab4d74d94 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -165,6 +165,7 @@ void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct dax_device *dax_dev;
blk_mode_t mode;
char name[16];
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 11fbd0ee1370..0388e8c20f52 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -105,6 +105,12 @@ enum fid_type {
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit inode number, 32 bit subvolume, 32 bit generation number:
+ */
+ FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1,
+ FILEID_BCACHEFS_WITH_PARENT = 0xb2,
+
+ /*
* 64 bit unique kernfs id
*/
FILEID_KERNFS = 0xfe,
@@ -224,9 +230,23 @@ struct export_operations {
atomic attribute updates
*/
#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */
unsigned long flags;
};
+/**
+ * exportfs_lock_op_is_async() - export op supports async lock operation
+ * @export_ops: the nfs export operations to check
+ *
+ * Returns true if the nfs export_operations structure has
+ * EXPORT_OP_ASYNC_LOCK in their flags set
+ */
+static inline bool
+exportfs_lock_op_is_async(const struct export_operations *export_ops)
+{
+ return export_ops->flags & EXPORT_OP_ASYNC_LOCK;
+}
+
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent,
int flags);
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index e066816f3519..bc4c3287a65e 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -98,20 +98,9 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un
return files_lookup_fd_raw(files, fd);
}
-static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd)
-{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "suspicious rcu_dereference_check() usage");
- return files_lookup_fd_raw(files, fd);
-}
-
-static inline struct file *lookup_fd_rcu(unsigned int fd)
-{
- return files_lookup_fd_rcu(current->files, fd);
-}
-
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd);
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd);
+struct file *lookup_fdget_rcu(unsigned int fd);
+struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
+struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index e8b12ec8b060..d1ea3898564c 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -100,6 +100,18 @@
#define SD_ITAPDLY 0xFF180314
#define SD_OTAPDLYSEL 0xFF180318
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18)
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19)
+#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13)
+#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12)
+
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
PM_ACKNOWLEDGE_CB = 31,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b528f063e8ff..bd522c0ac267 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -67,7 +67,7 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fscrypt_operations;
struct fsverity_info;
struct fsverity_operations;
@@ -671,8 +671,8 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 i_atime;
- struct timespec64 i_mtime;
+ struct timespec64 __i_atime;
+ struct timespec64 __i_mtime;
struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
@@ -738,7 +738,7 @@ struct inode {
#endif
#ifdef CONFIG_FS_ENCRYPTION
- struct fscrypt_info *i_crypt_info;
+ struct fscrypt_inode_info *i_crypt_info;
#endif
#ifdef CONFIG_FS_VERITY
@@ -1042,7 +1042,10 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
-#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
+
+struct file *get_file_rcu(struct file __rcu **f);
+struct file *get_file_active(struct file **f);
+
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1119,7 +1122,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_NOATIME BIT(10) /* Do not update access times. */
#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
#define SB_SILENT BIT(15)
-#define SB_POSIXACL BIT(16) /* VFS does not apply the umask */
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
#define SB_I_VERSION BIT(23) /* Update inode I_version field */
@@ -1166,6 +1169,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
/* Possible states of 'frozen' field */
enum {
@@ -1206,7 +1210,7 @@ struct super_block {
#ifdef CONFIG_SECURITY
void *s_security;
#endif
- const struct xattr_handler **s_xattr;
+ const struct xattr_handler * const *s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
@@ -1221,6 +1225,7 @@ struct super_block {
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
+ struct bdev_handle *s_bdev_handle;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
@@ -1511,24 +1516,81 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
-/**
- * inode_get_ctime - fetch the current ctime from the inode
- * @inode: inode from which to fetch ctime
- *
- * Grab the current ctime from the inode and return it.
- */
+static inline time64_t inode_get_atime_sec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_sec;
+}
+
+static inline long inode_get_atime_nsec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_atime(const struct inode *inode)
+{
+ return inode->__i_atime;
+}
+
+static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_atime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_atime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_atime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_mtime_sec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_sec;
+}
+
+static inline long inode_get_mtime_nsec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_mtime(const struct inode *inode)
+{
+ return inode->__i_mtime;
+}
+
+static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_mtime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_mtime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_mtime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_ctime_sec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_sec;
+}
+
+static inline long inode_get_ctime_nsec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_nsec;
+}
+
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
return inode->__i_ctime;
}
-/**
- * inode_set_ctime_to_ts - set the ctime in the inode
- * @inode: inode in which to set the ctime
- * @ts: value to set in the ctime field
- *
- * Set the ctime in @inode to @ts
- */
static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
struct timespec64 ts)
{
@@ -1553,6 +1615,8 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode,
return inode_set_ctime_to_ts(inode, ts);
}
+struct timespec64 simple_inode_init_ts(struct inode *inode);
+
/*
* Snapshotting support.
*/
@@ -2081,7 +2145,12 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+
+#ifdef CONFIG_FS_POSIX_ACL
#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
+#else
+#define IS_POSIXACL(inode) 0
+#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
@@ -2403,7 +2472,7 @@ struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- int refcnt;
+ atomic_t refcnt;
struct audit_names *aname;
const char iname[];
};
@@ -2448,24 +2517,24 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct file *backing_file_open(const struct path *path, int flags,
+struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred);
-struct path *backing_file_real_path(struct file *f);
+struct path *backing_file_user_path(struct file *f);
/*
- * file_real_path - get the path corresponding to f_inode
+ * file_user_path - get the path to display for memory mapped file
*
- * When opening a backing file for a stackable filesystem (e.g.,
- * overlayfs) f_path may be on the stackable filesystem and f_inode on
- * the underlying filesystem. When the path associated with f_inode is
- * needed, this helper should be used instead of accessing f_path
- * directly.
-*/
-static inline const struct path *file_real_path(struct file *f)
+ * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+ * stored in ->vm_file is a backing file whose f_inode is on the underlying
+ * filesystem. When the mapped file path is displayed to user (e.g. via
+ * /proc/<pid>/maps), this helper should be used to get the path to display
+ * to the user, which is the path of the fd that user has requested to map.
+ */
+static inline const struct path *file_user_path(struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
- return backing_file_real_path(f);
+ return backing_file_user_path(f);
return &f->f_path;
}
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 010d39d0dc1c..2b1f74b24070 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -16,14 +16,14 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
static inline void fsstack_copy_attr_atime(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
}
static inline void fsstack_copy_attr_times(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index c895b12737a1..12f9e455d569 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -31,7 +31,7 @@
#define FSCRYPT_CONTENTS_ALIGNMENT 16
union fscrypt_policy;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fs_parameter;
struct seq_file;
@@ -59,26 +59,55 @@ struct fscrypt_name {
#ifdef CONFIG_FS_ENCRYPTION
-/*
- * If set, the fscrypt bounce page pool won't be allocated (unless another
- * filesystem needs it). Set this if the filesystem always uses its own bounce
- * pages for writes and therefore won't need the fscrypt bounce page pool.
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
/* Crypto operations for filesystems */
struct fscrypt_operations {
- /* Set of optional flags; see above for allowed flags */
- unsigned int flags;
+ /*
+ * If set, then fs/crypto/ will allocate a global bounce page pool the
+ * first time an encryption key is set up for a file. The bounce page
+ * pool is required by the following functions:
+ *
+ * - fscrypt_encrypt_pagecache_blocks()
+ * - fscrypt_zeroout_range() for files not using inline crypto
+ *
+ * If the filesystem doesn't use those, it doesn't need to set this.
+ */
+ unsigned int needs_bounce_pages : 1;
/*
- * If set, this is a filesystem-specific key description prefix that
- * will be accepted for "logon" keys for v1 fscrypt policies, in
- * addition to the generic prefix "fscrypt:". This functionality is
- * deprecated, so new filesystems shouldn't set this field.
+ * If set, then fs/crypto/ will allow the use of encryption settings
+ * that assume inode numbers fit in 32 bits (i.e.
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other
+ * prerequisites for these settings are also met. This is only useful
+ * if the filesystem wants to support inline encryption hardware that is
+ * limited to 32-bit or 64-bit data unit numbers and where programming
+ * keyslots is very slow.
*/
- const char *key_prefix;
+ unsigned int has_32bit_inodes : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow users to select a crypto data unit
+ * size that is less than the filesystem block size. This is done via
+ * the log2_data_unit_size field of the fscrypt policy. This flag is
+ * not compatible with filesystems that encrypt variable-length blocks
+ * (i.e. blocks that aren't all equal to filesystem's block size), for
+ * example as a result of compression. It's also not compatible with
+ * the fscrypt_encrypt_block_inplace() and
+ * fscrypt_decrypt_block_inplace() functions.
+ */
+ unsigned int supports_subblock_data_units : 1;
+
+ /*
+ * This field exists only for backwards compatibility reasons and should
+ * only be set by the filesystems that are setting it already. It
+ * contains the filesystem-specific key description prefix that is
+ * accepted for "logon" keys for v1 fscrypt policies. This
+ * functionality is deprecated in favor of the generic prefix
+ * "fscrypt:", which itself is deprecated in favor of the filesystem
+ * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that
+ * are newly adding fscrypt support should not set this field.
+ */
+ const char *legacy_key_prefix;
/*
* Get the fscrypt context of the given inode.
@@ -146,21 +175,6 @@ struct fscrypt_operations {
bool (*has_stable_inodes)(struct super_block *sb);
/*
- * Get the number of bits that the filesystem uses to represent inode
- * numbers and file logical block numbers.
- *
- * By default, both of these are assumed to be 64-bit. This function
- * can be implemented to declare that either or both of these numbers is
- * shorter, which may allow the use of the
- * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
- * inline crypto hardware whose maximum DUN length is less than 64 bits
- * (e.g., eMMC v5.2 spec compliant hardware). This function only needs
- * to be implemented if support for one of these features is needed.
- */
- void (*get_ino_and_lblk_bits)(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret);
-
- /*
* Return an array of pointers to the block devices to which the
* filesystem may write encrypted file contents, NULL if the filesystem
* only has a single such block device, or an ERR_PTR() on error.
@@ -178,7 +192,8 @@ struct fscrypt_operations {
unsigned int *num_devs);
};
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
/*
* Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
@@ -390,7 +405,8 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
return NULL;
}
@@ -868,7 +884,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
*/
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
- return fscrypt_get_info(inode) != NULL;
+ return fscrypt_get_inode_info(inode) != NULL;
}
/**
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index ed48e4f1e755..bcb6609b54b3 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -96,8 +96,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
if (file->f_mode & FMODE_NONOTIFY)
return 0;
- /* Overlayfs internal files have fake f_path */
- path = file_real_path(file);
+ path = &file->f_path;
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index 107613f7d792..847413164738 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -38,6 +38,7 @@
#include <asm/page.h>
#include <linux/bug.h>
+#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/types.h>
@@ -116,6 +117,11 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_objs_per_page(_radix) \
+ (PAGE_SIZE / sizeof((_radix)->type[0]))
+#define __genradix_page_remainder(_radix) \
+ (PAGE_SIZE % sizeof((_radix)->type[0]))
+
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
@@ -179,11 +185,35 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
#define genradix_iter_peek(_iter, _radix) \
(__genradix_cast(_radix) \
__genradix_iter_peek(_iter, &(_radix)->tree, \
- PAGE_SIZE / __genradix_obj_size(_radix)))
+ __genradix_objs_per_page(_radix)))
+
+void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *,
+ size_t, size_t);
+
+/**
+ * genradix_iter_peek_prev - get first entry at or below iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or below @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek_prev(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek_prev(_iter, &(_radix)->tree, \
+ __genradix_objs_per_page(_radix), \
+ __genradix_obj_size(_radix) + \
+ __genradix_page_remainder(_radix)))
static inline void __genradix_iter_advance(struct genradix_iter *iter,
size_t obj_size)
{
+ if (iter->offset + obj_size < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return;
+ }
+
iter->offset += obj_size;
if (!is_power_of_2(obj_size) &&
@@ -196,6 +226,25 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_iter_advance(_iter, _radix) \
__genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+static inline void __genradix_iter_rewind(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ if (iter->offset == 0 ||
+ iter->offset == SIZE_MAX) {
+ iter->offset = SIZE_MAX;
+ return;
+ }
+
+ if ((iter->offset & (PAGE_SIZE - 1)) == 0)
+ iter->offset -= PAGE_SIZE % obj_size;
+
+ iter->offset -= obj_size;
+ iter->pos--;
+}
+
+#define genradix_iter_rewind(_iter, _radix) \
+ __genradix_iter_rewind(_iter, __genradix_obj_size(_radix))
+
#define genradix_for_each_from(_radix, _iter, _p, _start) \
for (_iter = genradix_iter_init(_radix, _start); \
(_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
@@ -213,6 +262,23 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_for_each(_radix, _iter, _p) \
genradix_for_each_from(_radix, _iter, _p, 0)
+#define genradix_last_pos(_radix) \
+ (SIZE_MAX / PAGE_SIZE * __genradix_objs_per_page(_radix) - 1)
+
+/**
+ * genradix_for_each_reverse - iterate over entry in a genradix, reverse order
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each_reverse(_radix, _iter, _p) \
+ for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\
+ (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\
+ genradix_iter_rewind(&_iter, _radix))
+
int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
/**
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a30686e649f7..47d25a5e1933 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -60,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
@@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -296,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+}
+
static inline struct page *hugetlb_follow_page_mask(
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask)
@@ -441,7 +473,7 @@ static inline long hugetlb_change_protection(
return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
@@ -1233,6 +1265,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
+static inline bool __vma_private_lock(struct vm_area_struct *vma)
+{
+ return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+}
+
/*
* Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset().
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index bd2f6e19c357..b24fb80782c5 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -4356,6 +4356,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
* privacy action frame
* @hdr: the frame
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
new file mode 100644
index 000000000000..270454a6703d
--- /dev/null
+++ b/include/linux/iov_iter.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* I/O iterator iteration building functions.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_IOV_ITER_H
+#define _LINUX_IOV_ITER_H
+
+#include <linux/uio.h>
+#include <linux/bvec.h>
+
+typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+
+/*
+ * Handle ITER_UBUF.
+ */
+static __always_inline
+size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ void __user *base = iter->ubuf;
+ size_t progress = 0, remain;
+
+ remain = step(base + iter->iov_offset, 0, len, priv, priv2);
+ progress = len - remain;
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_IOVEC.
+ */
+static __always_inline
+size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ const struct iovec *p = iter->__iov;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->__iov;
+ iter->__iov = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_KVEC.
+ */
+static __always_inline
+size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct kvec *p = iter->kvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->kvec;
+ iter->kvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_BVEC.
+ */
+static __always_inline
+size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct bio_vec *p = iter->bvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t offset = p->bv_offset + skip, part;
+ void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
+
+ part = min3(len,
+ (size_t)(p->bv_len - skip),
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE));
+ remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
+ kunmap_local(kaddr);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= p->bv_len) {
+ skip = 0;
+ p++;
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->nr_segs -= p - iter->bvec;
+ iter->bvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_XARRAY.
+ */
+static __always_inline
+size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ struct folio *folio;
+ size_t progress = 0;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ XA_STATE(xas, iter->xarray, index);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t remain, consumed, offset, part, flen;
+
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start + progress);
+ flen = min(folio_size(folio) - offset, len);
+
+ while (flen) {
+ void *base = kmap_local_folio(folio, offset);
+
+ part = min_t(size_t, flen,
+ PAGE_SIZE - offset_in_page(offset));
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+
+ consumed = part - remain;
+ progress += consumed;
+ len -= consumed;
+
+ if (remain || len == 0)
+ goto out;
+ flen -= consumed;
+ offset += consumed;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_DISCARD.
+ */
+static __always_inline
+size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ size_t progress = len;
+
+ iter->count -= progress;
+ return progress;
+}
+
+/**
+ * iterate_and_advance2 - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * Two step functions, @step and @ustep, must be provided, one for handling
+ * mapped kernel addresses and the other is given user addresses which have the
+ * potential to fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_ustep_f ustep, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+
+ if (likely(iter_is_ubuf(iter)))
+ return iterate_ubuf(iter, len, priv, priv2, ustep);
+ if (likely(iter_is_iovec(iter)))
+ return iterate_iovec(iter, len, priv, priv2, ustep);
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+/**
+ * iterate_and_advance - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * As iterate_and_advance2(), but priv2 is always NULL.
+ */
+static __always_inline
+size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
+ iov_ustep_f ustep, iov_step_f step)
+{
+ return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
+}
+
+#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index f174ff1b59ee..8f972eaca2ed 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -256,7 +256,7 @@ inode_peek_iversion(const struct inode *inode)
* For filesystems without any sort of change attribute, the best we can
* do is fake one up from the ctime:
*/
-static inline u64 time_to_chattr(struct timespec64 *t)
+static inline u64 time_to_chattr(const struct timespec64 *t)
{
u64 chattr = t->tv_sec;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 842623d708c2..71fa9a40fb5a 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -466,10 +466,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 85bda2d02d65..2c982ff7475a 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -74,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list)
}
/**
+ * init_llist_node - initialize lock-less list node
+ * @node: the node to be initialised
+ *
+ * In cases where there is a need to test if a node is on
+ * a list or not, this initialises the node to clearly
+ * not be on any list.
+ */
+static inline void init_llist_node(struct llist_node *node)
+{
+ node->next = node;
+}
+
+/**
+ * llist_on_list - test if a lock-list list node is on a list
+ * @node: the node to test
+ *
+ * When a node is on a list the ->next pointer will be NULL or
+ * some other node. It can never point to itself. We use that
+ * in init_llist_node() to record that a node is not on any list,
+ * and here to test whether it is on any list.
+ */
+static inline bool llist_on_list(const struct llist_node *node)
+{
+ return node->next != node;
+}
+
+/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
@@ -249,6 +276,25 @@ static inline struct llist_node *__llist_del_all(struct llist_head *head)
extern struct llist_node *llist_del_first(struct llist_head *head);
+/**
+ * llist_del_first_init - delete first entry from lock-list and mark is as being off-list
+ * @head: the head of lock-less list to delete from.
+ *
+ * This behave the same as llist_del_first() except that llist_init_node() is called
+ * on the returned node so that llist_on_list() will report false for the node.
+ */
+static inline struct llist_node *llist_del_first_init(struct llist_head *head)
+{
+ struct llist_node *n = llist_del_first(head);
+
+ if (n)
+ init_llist_node(n);
+ return n;
+}
+
+extern bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this);
+
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 0f016d69c996..9f565416d186 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -282,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-void nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
diff --git a/include/linux/lwq.h b/include/linux/lwq.h
new file mode 100644
index 000000000000..d081d5cf8e33
--- /dev/null
+++ b/include/linux/lwq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LWQ_H
+#define LWQ_H
+/*
+ * Light-weight single-linked queue built from llist
+ *
+ * Entries can be enqueued from any context with no locking.
+ * Entries can be dequeued from process context with integrated locking.
+ *
+ * This is particularly suitable when work items are queued in
+ * BH or IRQ context, and where work items are handled one at a time
+ * by dedicated threads.
+ */
+#include <linux/container_of.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+
+struct lwq_node {
+ struct llist_node node;
+};
+
+struct lwq {
+ spinlock_t lock;
+ struct llist_node *ready; /* entries to be dequeued */
+ struct llist_head new; /* entries being enqueued */
+};
+
+/**
+ * lwq_init - initialise a lwq
+ * @q: the lwq object
+ */
+static inline void lwq_init(struct lwq *q)
+{
+ spin_lock_init(&q->lock);
+ q->ready = NULL;
+ init_llist_head(&q->new);
+}
+
+/**
+ * lwq_empty - test if lwq contains any entry
+ * @q: the lwq object
+ *
+ * This empty test contains an acquire barrier so that if a wakeup
+ * is sent when lwq_dequeue returns true, it is safe to go to sleep after
+ * a test on lwq_empty().
+ */
+static inline bool lwq_empty(struct lwq *q)
+{
+ /* acquire ensures ordering wrt lwq_enqueue() */
+ return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
+}
+
+struct llist_node *__lwq_dequeue(struct lwq *q);
+/**
+ * lwq_dequeue - dequeue first (oldest) entry from lwq
+ * @q: the queue to dequeue from
+ * @type: the type of object to return
+ * @member: them member in returned object which is an lwq_node.
+ *
+ * Remove a single object from the lwq and return it. This will take
+ * a spinlock and so must always be called in the same context, typcially
+ * process contet.
+ */
+#define lwq_dequeue(q, type, member) \
+ ({ struct llist_node *_n = __lwq_dequeue(q); \
+ _n ? container_of(_n, type, member.node) : NULL; })
+
+struct llist_node *lwq_dequeue_all(struct lwq *q);
+
+/**
+ * lwq_for_each_safe - iterate over detached queue allowing deletion
+ * @_n: iterator variable
+ * @_t1: temporary struct llist_node **
+ * @_t2: temporary struct llist_node *
+ * @_l: address of llist_node pointer from lwq_dequeue_all()
+ * @_member: member in _n where lwq_node is found.
+ *
+ * Iterate over members in a dequeued list. If the iterator variable
+ * is set to NULL, the iterator removes that entry from the queue.
+ */
+#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
+ for (_t1 = (_l); \
+ *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
+ _t2 = ((*_t1)->next), \
+ true) \
+ : false; \
+ (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
+ : ((*(_t1) = (_t2)), 0))
+
+/**
+ * lwq_enqueue - add a new item to the end of the queue
+ * @n - the lwq_node embedded in the item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
+{
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add(&n->node, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+
+/**
+ * lwq_enqueue_batch - add a list of new items to the end of the queue
+ * @n - the lwq_node embedded in the first item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
+{
+ struct llist_node *e = n;
+
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+#endif /* LWQ_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 4f40b40306d0..ac3dd2876197 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -92,8 +92,8 @@ extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
extern struct vfsmount *clone_private_mount(const struct path *path);
-extern int __mnt_want_write(struct vfsmount *);
-extern void __mnt_drop_write(struct vfsmount *);
+int mnt_get_write_access(struct vfsmount *mnt);
+void mnt_put_write_access(struct vfsmount *mnt);
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 0b6b59f7cfbd..56047a4e54c9 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
/* JEDEC features */
#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
struct nand_jedec_params {
/* rev info and features block */
/* 'J' 'E' 'S' 'D' */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index a7376f9beddf..55ab2e4d62f9 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -55,6 +55,7 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 90a141ba2a5a..c29ace15a053 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -225,6 +225,7 @@ struct gpio_desc;
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
* @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
* @set_feature_list: Bitmap of features that can be set
* @get_feature_list: Bitmap of features that can be get
* @onfi: ONFI specific parameters
@@ -233,6 +234,7 @@ struct nand_parameters {
/* Generic parameters */
const char *model;
bool supports_set_get_features;
+ bool supports_read_cache;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 1463cbda4888..3100371b5e32 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -92,6 +92,30 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
+/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * In most filesystems, umask stripping depends on whether or not the
+ * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask
+ * stripping is done directly in here. If the filesystem does support POSIX
+ * ACLs umask stripping is deferred until the filesystem calls
+ * posix_acl_create().
+ *
+ * Some filesystems (like NFSv4) also want to avoid umask stripping by the
+ * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK
+ * to get this effect without declaring that they support POSIX ACLs.
+ *
+ * Returns: mode
+ */
+static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK))
+ mode &= ~current_umask();
+ return mode;
+}
+
extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
@@ -112,7 +136,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
static inline bool
retry_estale(const long error, const unsigned int flags)
{
- return error == -ESTALE && !(flags & LOOKUP_REVAL);
+ return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL));
}
#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 730003c4f4af..c11c4db34639 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -150,7 +150,7 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
- /* xattr support (RFC8726) */
+ /* xattr support (RFC8276) */
OP_GETXATTR = 72,
OP_SETXATTR = 73,
OP_LISTXATTRS = 74,
@@ -389,79 +389,203 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
+/*
+ * Symbol names and values are from RFC 7531 Section 2.
+ * "XDR Description of NFSv4.0"
+ */
+enum {
+ FATTR4_SUPPORTED_ATTRS = 0,
+ FATTR4_TYPE = 1,
+ FATTR4_FH_EXPIRE_TYPE = 2,
+ FATTR4_CHANGE = 3,
+ FATTR4_SIZE = 4,
+ FATTR4_LINK_SUPPORT = 5,
+ FATTR4_SYMLINK_SUPPORT = 6,
+ FATTR4_NAMED_ATTR = 7,
+ FATTR4_FSID = 8,
+ FATTR4_UNIQUE_HANDLES = 9,
+ FATTR4_LEASE_TIME = 10,
+ FATTR4_RDATTR_ERROR = 11,
+ FATTR4_ACL = 12,
+ FATTR4_ACLSUPPORT = 13,
+ FATTR4_ARCHIVE = 14,
+ FATTR4_CANSETTIME = 15,
+ FATTR4_CASE_INSENSITIVE = 16,
+ FATTR4_CASE_PRESERVING = 17,
+ FATTR4_CHOWN_RESTRICTED = 18,
+ FATTR4_FILEHANDLE = 19,
+ FATTR4_FILEID = 20,
+ FATTR4_FILES_AVAIL = 21,
+ FATTR4_FILES_FREE = 22,
+ FATTR4_FILES_TOTAL = 23,
+ FATTR4_FS_LOCATIONS = 24,
+ FATTR4_HIDDEN = 25,
+ FATTR4_HOMOGENEOUS = 26,
+ FATTR4_MAXFILESIZE = 27,
+ FATTR4_MAXLINK = 28,
+ FATTR4_MAXNAME = 29,
+ FATTR4_MAXREAD = 30,
+ FATTR4_MAXWRITE = 31,
+ FATTR4_MIMETYPE = 32,
+ FATTR4_MODE = 33,
+ FATTR4_NO_TRUNC = 34,
+ FATTR4_NUMLINKS = 35,
+ FATTR4_OWNER = 36,
+ FATTR4_OWNER_GROUP = 37,
+ FATTR4_QUOTA_AVAIL_HARD = 38,
+ FATTR4_QUOTA_AVAIL_SOFT = 39,
+ FATTR4_QUOTA_USED = 40,
+ FATTR4_RAWDEV = 41,
+ FATTR4_SPACE_AVAIL = 42,
+ FATTR4_SPACE_FREE = 43,
+ FATTR4_SPACE_TOTAL = 44,
+ FATTR4_SPACE_USED = 45,
+ FATTR4_SYSTEM = 46,
+ FATTR4_TIME_ACCESS = 47,
+ FATTR4_TIME_ACCESS_SET = 48,
+ FATTR4_TIME_BACKUP = 49,
+ FATTR4_TIME_CREATE = 50,
+ FATTR4_TIME_DELTA = 51,
+ FATTR4_TIME_METADATA = 52,
+ FATTR4_TIME_MODIFY = 53,
+ FATTR4_TIME_MODIFY_SET = 54,
+ FATTR4_MOUNTED_ON_FILEID = 55,
+};
+
+/*
+ * Symbol names and values are from RFC 5662 Section 2.
+ * "XDR Description of NFSv4.1"
+ */
+enum {
+ FATTR4_DIR_NOTIF_DELAY = 56,
+ FATTR4_DIRENT_NOTIF_DELAY = 57,
+ FATTR4_DACL = 58,
+ FATTR4_SACL = 59,
+ FATTR4_CHANGE_POLICY = 60,
+ FATTR4_FS_STATUS = 61,
+ FATTR4_FS_LAYOUT_TYPES = 62,
+ FATTR4_LAYOUT_HINT = 63,
+ FATTR4_LAYOUT_TYPES = 64,
+ FATTR4_LAYOUT_BLKSIZE = 65,
+ FATTR4_LAYOUT_ALIGNMENT = 66,
+ FATTR4_FS_LOCATIONS_INFO = 67,
+ FATTR4_MDSTHRESHOLD = 68,
+ FATTR4_RETENTION_GET = 69,
+ FATTR4_RETENTION_SET = 70,
+ FATTR4_RETENTEVT_GET = 71,
+ FATTR4_RETENTEVT_SET = 72,
+ FATTR4_RETENTION_HOLD = 73,
+ FATTR4_MODE_SET_MASKED = 74,
+ FATTR4_SUPPATTR_EXCLCREAT = 75,
+ FATTR4_FS_CHARSET_CAP = 76,
+};
+
+/*
+ * Symbol names and values are from RFC 7863 Section 2.
+ * "XDR Description of NFSv4.2"
+ */
+enum {
+ FATTR4_CLONE_BLKSIZE = 77,
+ FATTR4_SPACE_FREED = 78,
+ FATTR4_CHANGE_ATTR_TYPE = 79,
+ FATTR4_SEC_LABEL = 80,
+};
+
+/*
+ * Symbol names and values are from RFC 8275 Section 5.
+ * "The mode_umask Attribute"
+ */
+enum {
+ FATTR4_MODE_UMASK = 81,
+};
+
+/*
+ * Symbol names and values are from RFC 8276 Section 8.6.
+ * "Numeric Values Assigned to Protocol Extensions"
+ */
+enum {
+ FATTR4_XATTR_SUPPORT = 82,
+};
+
+/*
+ * The following internal definitions enable processing the above
+ * attribute bits within 32-bit word boundaries.
+ */
/* Mandatory Attributes */
-#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
-#define FATTR4_WORD0_TYPE (1UL << 1)
-#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2)
-#define FATTR4_WORD0_CHANGE (1UL << 3)
-#define FATTR4_WORD0_SIZE (1UL << 4)
-#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5)
-#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6)
-#define FATTR4_WORD0_NAMED_ATTR (1UL << 7)
-#define FATTR4_WORD0_FSID (1UL << 8)
-#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
-#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
-#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS)
+#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE)
+#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE)
+#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE)
+#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE)
+#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT)
+#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT)
+#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR)
+#define FATTR4_WORD0_FSID BIT(FATTR4_FSID)
+#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES)
+#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME)
+#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR)
/* Mandatory in NFSv4.1 */
-#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64)
/* Recommended Attributes */
-#define FATTR4_WORD0_ACL (1UL << 12)
-#define FATTR4_WORD0_ACLSUPPORT (1UL << 13)
-#define FATTR4_WORD0_ARCHIVE (1UL << 14)
-#define FATTR4_WORD0_CANSETTIME (1UL << 15)
-#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16)
-#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17)
-#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18)
-#define FATTR4_WORD0_FILEHANDLE (1UL << 19)
-#define FATTR4_WORD0_FILEID (1UL << 20)
-#define FATTR4_WORD0_FILES_AVAIL (1UL << 21)
-#define FATTR4_WORD0_FILES_FREE (1UL << 22)
-#define FATTR4_WORD0_FILES_TOTAL (1UL << 23)
-#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24)
-#define FATTR4_WORD0_HIDDEN (1UL << 25)
-#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26)
-#define FATTR4_WORD0_MAXFILESIZE (1UL << 27)
-#define FATTR4_WORD0_MAXLINK (1UL << 28)
-#define FATTR4_WORD0_MAXNAME (1UL << 29)
-#define FATTR4_WORD0_MAXREAD (1UL << 30)
-#define FATTR4_WORD0_MAXWRITE (1UL << 31)
-#define FATTR4_WORD1_MIMETYPE (1UL << 0)
-#define FATTR4_WORD1_MODE (1UL << 1)
-#define FATTR4_WORD1_NO_TRUNC (1UL << 2)
-#define FATTR4_WORD1_NUMLINKS (1UL << 3)
-#define FATTR4_WORD1_OWNER (1UL << 4)
-#define FATTR4_WORD1_OWNER_GROUP (1UL << 5)
-#define FATTR4_WORD1_QUOTA_HARD (1UL << 6)
-#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7)
-#define FATTR4_WORD1_QUOTA_USED (1UL << 8)
-#define FATTR4_WORD1_RAWDEV (1UL << 9)
-#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10)
-#define FATTR4_WORD1_SPACE_FREE (1UL << 11)
-#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12)
-#define FATTR4_WORD1_SPACE_USED (1UL << 13)
-#define FATTR4_WORD1_SYSTEM (1UL << 14)
-#define FATTR4_WORD1_TIME_ACCESS (1UL << 15)
-#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16)
-#define FATTR4_WORD1_TIME_BACKUP (1UL << 17)
-#define FATTR4_WORD1_TIME_CREATE (1UL << 18)
-#define FATTR4_WORD1_TIME_DELTA (1UL << 19)
-#define FATTR4_WORD1_TIME_METADATA (1UL << 20)
-#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
-#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
-#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
-#define FATTR4_WORD1_DACL (1UL << 26)
-#define FATTR4_WORD1_SACL (1UL << 27)
-#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
-#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
-#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
-#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
-#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
-#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
-#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
-#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
+#define FATTR4_WORD0_ACL BIT(FATTR4_ACL)
+#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT)
+#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE)
+#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME)
+#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE)
+#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING)
+#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED)
+#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE)
+#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID)
+#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL)
+#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE)
+#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL)
+#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS)
+#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN)
+#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS)
+#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE)
+#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK)
+#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME)
+#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD)
+#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE)
+
+#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32)
+#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32)
+#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32)
+#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32)
+#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32)
+#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32)
+#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32)
+#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32)
+#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32)
+#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32)
+#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32)
+#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32)
+#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32)
+#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32)
+#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32)
+#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32)
+#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32)
+#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32)
+#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32)
+#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32)
+#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32)
+#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32)
+#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32)
+#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32)
+#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32)
+#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32)
+
+#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64)
+#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64)
+#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64)
+#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64)
+#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64)
+#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e85cd1c0eaf3..7b5406e3288d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -704,6 +704,7 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
/*
* event->pmu will always point to pmu in which this event belongs.
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 608a9eb86bff..8ff23bf5a819 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -62,9 +62,6 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
-#ifdef CONFIG_WATCH_QUEUE
- bool note_loss;
-#endif
unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
@@ -72,6 +69,9 @@ struct pipe_inode_info {
unsigned int r_counter;
unsigned int w_counter;
bool poll_usage;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
@@ -125,6 +125,22 @@ struct pipe_buf_operations {
};
/**
+ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
+ * i.e. it was created with O_NOTIFICATION_PIPE
+ * @pipe: The pipe to check
+ *
+ * Return: true if pipe is a watch queue, false otherwise.
+ */
+static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
+{
+#ifdef CONFIG_WATCH_QUEUE
+ return pipe->watch_queue != NULL;
+#else
+ return false;
+#endif
+}
+
+/**
* pipe_empty - Return true if the pipe is empty
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 80cb00db42a4..79594aeb160d 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -154,7 +154,9 @@ struct packet_stacked_data
struct pktcdvd_device
{
- struct block_device *bdev; /* dev attached */
+ struct bdev_handle *bdev_handle; /* dev attached */
+ /* handle acquired for bdev during pkt_open_dev() */
+ struct bdev_handle *open_bdev_handle;
dev_t pkt_dev; /* our dev */
struct packet_settings settings;
struct packet_stats stats;
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index eceda1d1407a..730f77381d55 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,7 +5,7 @@
struct pseudo_fs_context {
const struct super_operations *ops;
- const struct xattr_handler **xattr;
+ const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f01ac385f7..d5951e99706a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -875,6 +875,7 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
+ struct address_space *faults_disabled_mapping;
int exit_state;
int exit_code;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 97bfef071255..27998f73183e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3679,6 +3679,9 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
return __skb_put_padto(skb, len, true);
}
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
+ __must_check;
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 9d1f5bb74dd5..58fb1f90eda5 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -24,8 +24,8 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
-void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
- char *buf, int len);
+int string_get_size(u64 size, u64 blk_size, enum string_size_units units,
+ char *buf, int len);
int parse_int_array_user(const char __user *from, size_t count, int **array);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index dbf5b21feafe..b10f987509cc 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -17,6 +17,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
@@ -33,10 +34,10 @@
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
- spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_sockets; /* pending sockets */
- unsigned int sp_nrthreads; /* # of threads in pool */
+ struct lwq sp_xprts; /* pending transports */
+ atomic_t sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
+ struct llist_head sp_idle_threads; /* idle server threads */
/* statistics on pool operation */
struct percpu_counter sp_messages_arrived;
@@ -49,7 +50,8 @@ struct svc_pool {
/* bits for sp_flags */
enum {
SP_TASK_PENDING, /* still work to do even if no xprt is queued */
- SP_CONGESTED, /* all threads are busy, none idle */
+ SP_NEED_VICTIM, /* One thread needs to agree to exit */
+ SP_VICTIM_REMAINS, /* One thread needs to actually exit */
};
@@ -88,12 +90,9 @@ struct svc_serv {
int (*sv_threadfn)(void *data);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head sv_cb_list; /* queue for callback requests
+ struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
- spinlock_t sv_cb_lock; /* protects the svc_cb_list */
- wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
- * entries in the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
@@ -186,6 +185,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
+ struct llist_node rq_idle; /* On the idle list */
struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
@@ -251,6 +251,7 @@ struct svc_rqst {
* net namespace
*/
void ** rq_lease_breaker; /* The v4 client breaking a lease */
+ unsigned int rq_status_counter; /* RPC processing counter */
};
/* bits for rq_flags */
@@ -261,8 +262,7 @@ enum {
RQ_DROPME, /* drop current reply */
RQ_SPLICE_OK, /* turned off in gss privacy to prevent
* encrypting page cache pages */
- RQ_VICTIM, /* about to be shut down */
- RQ_BUSY, /* request is busy */
+ RQ_VICTIM, /* Have agreed to shut down */
RQ_DATA, /* request has data */
};
@@ -301,6 +301,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
+/**
+ * svc_thread_should_stop - check if this thread should stop
+ * @rqstp: the thread that might need to stop
+ *
+ * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
+ * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
+ * the victim using this function. It should then promptly call
+ * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
+ * so the task waiting for a thread to exit can wake and continue.
+ *
+ * Return values:
+ * %true: caller should invoke svc_exit_thread()
+ * %false: caller should do nothing
+ */
+static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
+{
+ if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
+
+ return test_bit(RQ_VICTIM, &rqstp->rq_flags);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -413,8 +435,7 @@ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_process(struct svc_rqst *rqstp);
-int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
- struct svc_rqst *);
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
const unsigned short, const unsigned short);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index fa55d12dc765..8e20cd60e2e7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -54,7 +54,7 @@ struct svc_xprt {
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
struct list_head xpt_list;
- struct list_head xpt_ready;
+ struct lwq_node xpt_ready;
unsigned long xpt_flags;
struct svc_serv *xpt_server; /* service for transport */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 4ecc89301eb7..f85d3a0daca2 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -57,6 +57,7 @@ struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
+#include <linux/lwq.h>
/*
* This describes a complete RPC request
@@ -121,7 +122,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head rq_bc_list; /* Callback service list */
+ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 493487ed7c38..f6dd6575b905 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -298,6 +298,7 @@ struct swap_info_struct {
unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
+ struct bdev_handle *bdev_handle;/* open handle of the bdev */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 42bce38a8e87..b6214cbf2a43 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -21,12 +21,12 @@ struct kvec {
enum iter_type {
/* iter types */
+ ITER_UBUF,
ITER_IOVEC,
- ITER_KVEC,
ITER_BVEC,
+ ITER_KVEC,
ITER_XARRAY,
ITER_DISCARD,
- ITER_UBUF,
};
#define ITER_SOURCE 1 // == WRITE
@@ -43,11 +43,7 @@ struct iov_iter {
bool copy_mc;
bool nofault;
bool data_source;
- bool user_backed;
- union {
- size_t iov_offset;
- int last_offset;
- };
+ size_t iov_offset;
/*
* Hack alert: overlay ubuf_iovec with iovec + count, so
* that the members resolve correctly regardless of the type
@@ -143,7 +139,7 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
static inline bool user_backed_iter(const struct iov_iter *i)
{
- return i->user_backed;
+ return iter_is_ubuf(i) || iter_is_iovec(i);
}
/*
@@ -342,27 +338,6 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
return npages;
}
-struct csum_state {
- __wsum csum;
- size_t off;
-};
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-
-static __always_inline __must_check
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
-{
- size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
- if (likely(copied == bytes))
- return true;
- iov_iter_revert(i, copied);
- return false;
-}
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i);
-
struct iovec *iovec_from_user(const struct iovec __user *uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_iov, bool compat);
@@ -383,7 +358,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
*i = (struct iov_iter) {
.iter_type = ITER_UBUF,
.copy_mc = false,
- .user_backed = true,
.data_source = direction,
.ubuf = buf,
.count = count,
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 7b4dd69555e4..27cc1d464321 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,8 +3,8 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
+#include <linux/udp.h>
#include <uapi/linux/tcp.h>
-#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
@@ -151,9 +151,22 @@ retry:
unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* UFO may not include transport header in gso_size. */
- if (gso_type & SKB_GSO_UDP)
+ switch (gso_type & ~SKB_GSO_TCP_ECN) {
+ case SKB_GSO_UDP:
+ /* UFO may not include transport header in gso_size. */
nh_off -= thlen;
+ break;
+ case SKB_GSO_UDP_L4:
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+ if (skb->csum_offset != offsetof(struct udphdr, check))
+ return -EINVAL;
+ if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
+ return -EINVAL;
+ if (gso_type != SKB_GSO_UDP_L4)
+ return -EINVAL;
+ break;
+ }
/* Kernel has a special handling for GSO_BY_FRAGS. */
if (gso_size == GSO_BY_FRAGS)
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
index 45cd42f55d49..429c7b6afead 100644
--- a/include/linux/watch_queue.h
+++ b/include/linux/watch_queue.h
@@ -32,7 +32,7 @@ struct watch_filter {
DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
};
u32 nr_filters; /* Number of filters */
- struct watch_type_filter filters[];
+ struct watch_type_filter filters[] __counted_by(nr_filters);
};
struct watch_queue {
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 2d5fcda1bcd0..082f89531b88 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -56,7 +56,7 @@ struct hci_mon_new_index {
__u8 type;
__u8 bus;
bdaddr_t bdaddr;
- char name[8];
+ char name[8] __nonstring;
} __packed;
#define HCI_MON_NEW_INDEX_SIZE 16
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index d466e1a3b0b1..fe1507c1db82 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -53,6 +53,7 @@ struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ bool (*gc)(const struct flow_offload *flow);
int (*setup)(struct nf_flowtable *ft,
struct net_device *dev,
enum flow_block_command cmd);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index bd7c3be4af5d..423b52eca908 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -50,6 +50,7 @@ struct netns_xfrm {
struct list_head policy_all;
struct hlist_head *policy_byidx;
unsigned int policy_idx_hmask;
+ unsigned int idx_generator;
struct hlist_head policy_inexact[XFRM_POLICY_MAX];
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
diff --git a/include/net/sock.h b/include/net/sock.h
index b770261fbdaf..92f7ea62a915 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -336,7 +336,7 @@ struct sk_filter;
* @sk_cgrp_data: cgroup data for this cgroup
* @sk_memcg: this socket's memory cgroup association
* @sk_write_pending: a write to stream socket waits to start
- * @sk_wait_pending: number of threads blocked on this socket
+ * @sk_disconnects: number of disconnect operations performed on this sock
* @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed
* @sk_write_space: callback to indicate there is bf sending space available
@@ -429,7 +429,7 @@ struct sock {
unsigned int sk_napi_id;
#endif
int sk_rcvbuf;
- int sk_wait_pending;
+ int sk_disconnects;
struct sk_filter __rcu *sk_filter;
union {
@@ -1189,8 +1189,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
}
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
- ({ int __rc; \
- __sk->sk_wait_pending++; \
+ ({ int __rc, __dis = __sk->sk_disconnects; \
release_sock(__sk); \
__rc = __condition; \
if (!__rc) { \
@@ -1200,8 +1199,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
} \
sched_annotate_sleep(); \
lock_sock(__sk); \
- __sk->sk_wait_pending--; \
- __rc = __condition; \
+ __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
__rc; \
})
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7b1a720691ae..4b03ca7cb8a5 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -141,6 +141,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RTO_MAX ((unsigned)(120*HZ))
#define TCP_RTO_MIN ((unsigned)(HZ/5))
#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
+
+#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
+
#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
* used as a fallback RTO for the
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index fd41fdac0a8e..65e49fae8da7 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -162,8 +162,24 @@ struct scsi_device {
* core. */
unsigned int eh_timeout; /* Error handling timeout */
- bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
- bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system suspend/resume (suspend to RAM and
+ * hibernation) operations.
+ */
+ bool manage_system_start_stop;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for runtime device suspand and resume operations.
+ */
+ bool manage_runtime_start_stop;
+
+ /*
+ * If true, let the high-level device driver (sd) manage the device
+ * power state for system shutdown (power off) operations.
+ */
+ bool manage_shutdown;
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index d2faec9a323e..433543eb82b9 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -469,6 +469,7 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index b2db2c2f1c57..279a7a0c90c0 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -1561,7 +1561,6 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( const void *, wq )
__field( const void *, func )
__field( const void *, ordered_func )
- __field( const void *, ordered_free )
__field( const void *, normal_work )
),
@@ -1570,14 +1569,12 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->wq = work->wq;
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
- __entry->ordered_free = work->ordered_free;
__entry->normal_work = &work->normal_work;
),
- TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p "
- "ordered_free=%p",
+ TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p",
__entry->work, __entry->normal_work, __entry->wq,
- __entry->func, __entry->ordered_func, __entry->ordered_free)
+ __entry->func, __entry->ordered_func)
);
/*
@@ -2497,6 +2494,82 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_write,
TP_ARGS(rbio, bio, trace_info)
);
+TRACE_EVENT(btrfs_insert_one_raid_extent,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ int num_stripes),
+
+ TP_ARGS(fs_info, logical, length, num_stripes),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( int, num_stripes )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->num_stripes = num_stripes;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu num_stripes=%d",
+ __entry->logical, __entry->length,
+ __entry->num_stripes)
+);
+
+TRACE_EVENT(btrfs_raid_extent_delete,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 end,
+ u64 found_start, u64 found_end),
+
+ TP_ARGS(fs_info, start, end, found_start, found_end),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, start )
+ __field( u64, end )
+ __field( u64, found_start )
+ __field( u64, found_end )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->start = start;
+ __entry->end = end;
+ __entry->found_start = found_start;
+ __entry->found_end = found_end;
+ ),
+
+ TP_printk_btrfs("start=%llu end=%llu found_start=%llu found_end=%llu",
+ __entry->start, __entry->end, __entry->found_start,
+ __entry->found_end)
+);
+
+TRACE_EVENT(btrfs_get_raid_extent_offset,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
+ u64 physical, u64 devid),
+
+ TP_ARGS(fs_info, logical, length, physical, devid),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, logical )
+ __field( u64, length )
+ __field( u64, physical )
+ __field( u64, devid )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->logical = logical;
+ __entry->length = length;
+ __entry->physical = physical;
+ __entry->devid = devid;
+ ),
+
+ TP_printk_btrfs("logical=%llu length=%llu physical=%llu devid=%llu",
+ __entry->logical, __entry->length, __entry->physical,
+ __entry->devid)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h
index 5eaa1fa99171..833143d0992e 100644
--- a/include/trace/events/neigh.h
+++ b/include/trace/events/neigh.h
@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
),
TP_fast_assign(
- struct in6_addr *pin6;
__be32 *p32;
__entry->family = tbl->family;
@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
__entry->entries = atomic_read(&tbl->gc_entries);
__entry->created = n != NULL;
__entry->gc_exempt = exempt_from_gc;
- pin6 = (struct in6_addr *)__entry->primary_key6;
p32 = (__be32 *)__entry->primary_key4;
if (tbl->family == AF_INET)
@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
#if IS_ENABLED(CONFIG_IPV6)
if (tbl->family == AF_INET6) {
+ struct in6_addr *pin6;
+
pin6 = (struct in6_addr *)__entry->primary_key6;
*pin6 = *(struct in6_addr *)pkey;
}
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index f8069ef2ee0f..718df1d9b834 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1667,7 +1667,7 @@ TRACE_EVENT(svcrdma_encode_wseg,
__entry->offset = offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1703,7 +1703,7 @@ TRACE_EVENT(svcrdma_decode_rseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->position, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1740,7 +1740,7 @@ TRACE_EVENT(svcrdma_decode_wseg,
__entry->offset = segment->rs_offset;
),
- TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
+ TP_printk("cq.id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->segno, __entry->length,
(unsigned long long)__entry->offset, __entry->handle
@@ -1959,7 +1959,7 @@ TRACE_EVENT(svcrdma_send_pullup,
__entry->msglen = msglen;
),
- TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
+ TP_printk("cq.id=%u cid=%d hdr=%u msg=%u (total %u)",
__entry->cq_id, __entry->completion_id,
__entry->hdrlen, __entry->msglen,
__entry->hdrlen + __entry->msglen)
@@ -2014,7 +2014,7 @@ TRACE_EVENT(svcrdma_post_send,
wr->ex.invalidate_rkey : 0;
),
- TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
+ TP_printk("cq.id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
__entry->cq_id, __entry->completion_id,
__entry->num_sge, __entry->inv_rkey
)
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 6beb38c1dcb5..337c90787fb1 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1677,7 +1677,6 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
svc_rqst_flag(DROPME) \
svc_rqst_flag(SPLICE_OK) \
svc_rqst_flag(VICTIM) \
- svc_rqst_flag(BUSY) \
svc_rqst_flag_end(DATA)
#undef svc_rqst_flag
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index eaf9f248619f..0bade1592f34 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -45,8 +45,8 @@ extern "C" {
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
-/**
- * @NOUVEAU_GETPARAM_EXEC_PUSH_MAX
+/*
+ * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
*
* Query the maximum amount of IBs that can be pushed through a single
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index dbb8b96da50d..7c29d82db9ee 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -333,6 +333,8 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
#define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
+#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
+#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@@ -753,6 +755,7 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_QUOTA_CTL_ENABLE 1
#define BTRFS_QUOTA_CTL_DISABLE 2
#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3
+#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4
struct btrfs_ioctl_quota_ctl_args {
__u64 cmd;
__u64 status;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index fc3c32186d7e..c25fc9614594 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -73,6 +73,9 @@
/* Holds the block group items for extent tree v2. */
#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
+/* Tracks RAID stripes in block groups. */
+#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
+
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@@ -231,6 +234,14 @@
#define BTRFS_SHARED_DATA_REF_KEY 184
/*
+ * Special inline ref key which stores the id of the subvolume which originally
+ * created the extent. This subvolume owns the extent permanently from the
+ * perspective of simple quotas. Needed to know which subvolume to free quota
+ * usage from when the extent is deleted.
+ */
+#define BTRFS_EXTENT_OWNER_REF_KEY 188
+
+/*
* block groups give us hints into the extent allocation trees. Which
* blocks are free etc etc
*/
@@ -261,6 +272,8 @@
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
+#define BTRFS_RAID_STRIPE_KEY 230
+
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@@ -719,6 +732,30 @@ struct btrfs_free_space_header {
__le64 num_bitmaps;
} __attribute__ ((__packed__));
+struct btrfs_raid_stride {
+ /* The id of device this raid extent lives on. */
+ __le64 devid;
+ /* The physical location on disk. */
+ __le64 physical;
+} __attribute__ ((__packed__));
+
+/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
+#define BTRFS_STRIPE_RAID0 1
+#define BTRFS_STRIPE_RAID1 2
+#define BTRFS_STRIPE_DUP 3
+#define BTRFS_STRIPE_RAID10 4
+#define BTRFS_STRIPE_RAID5 5
+#define BTRFS_STRIPE_RAID6 6
+#define BTRFS_STRIPE_RAID1C3 7
+#define BTRFS_STRIPE_RAID1C4 8
+
+struct btrfs_stripe_extent {
+ __u8 encoding;
+ __u8 reserved[7];
+ /* An array of raid strides this stripe is composed of. */
+ struct btrfs_raid_stride strides[];
+} __attribute__ ((__packed__));
+
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
@@ -787,6 +824,10 @@ struct btrfs_shared_data_ref {
__le32 count;
} __attribute__ ((__packed__));
+struct btrfs_extent_owner_ref {
+ __le64 root_id;
+} __attribute__ ((__packed__));
+
struct btrfs_extent_inline_ref {
__u8 type;
__le64 offset;
@@ -1204,9 +1245,17 @@ static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+/*
+ * Whether or not this filesystem is using simple quotas. Not exactly the
+ * incompat bit, because we support using simple quotas, disabling it, then
+ * going back to full qgroup quotas.
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3)
+
#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
- BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \
+ BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
#define BTRFS_QGROUP_STATUS_VERSION 1
@@ -1228,6 +1277,15 @@ struct btrfs_qgroup_status_item {
* of the scan. It contains a logical address
*/
__le64 rescan;
+
+ /*
+ * The generation when quotas were last enabled. Used by simple quotas to
+ * avoid decrementing when freeing an extent that was written before
+ * enable.
+ *
+ * Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE.
+ */
+ __le64 enable_gen;
} __attribute__ ((__packed__));
struct btrfs_qgroup_info_item {
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index fd1fb0d5389d..7a8f4c290187 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -71,7 +71,8 @@ struct fscrypt_policy_v2 {
__u8 contents_encryption_mode;
__u8 filenames_encryption_mode;
__u8 flags;
- __u8 __reserved[4];
+ __u8 log2_data_unit_size;
+ __u8 __reserved[3];
__u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
};
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index 2f61298a7b77..3dcdb9e33cba 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -33,6 +33,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
-#define GTPA_MAX (__GTPA_MAX + 1)
+#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/include/uapi/linux/nfsd_netlink.h b/include/uapi/linux/nfsd_netlink.h
new file mode 100644
index 000000000000..c8ae72466ee6
--- /dev/null
+++ b/include/uapi/linux/nfsd_netlink.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NFSD_H
+#define _UAPI_LINUX_NFSD_H
+
+#define NFSD_FAMILY_NAME "nfsd"
+#define NFSD_FAMILY_VERSION 1
+
+enum {
+ NFSD_A_RPC_STATUS_XID = 1,
+ NFSD_A_RPC_STATUS_FLAGS,
+ NFSD_A_RPC_STATUS_PROG,
+ NFSD_A_RPC_STATUS_VERSION,
+ NFSD_A_RPC_STATUS_PROC,
+ NFSD_A_RPC_STATUS_SERVICE_TIME,
+ NFSD_A_RPC_STATUS_PAD,
+ NFSD_A_RPC_STATUS_SADDR4,
+ NFSD_A_RPC_STATUS_DADDR4,
+ NFSD_A_RPC_STATUS_SADDR6,
+ NFSD_A_RPC_STATUS_DADDR6,
+ NFSD_A_RPC_STATUS_SPORT,
+ NFSD_A_RPC_STATUS_DPORT,
+ NFSD_A_RPC_STATUS_COMPOUND_OPS,
+
+ __NFSD_A_RPC_STATUS_MAX,
+ NFSD_A_RPC_STATUS_MAX = (__NFSD_A_RPC_STATUS_MAX - 1)
+};
+
+enum {
+ NFSD_CMD_RPC_STATUS_GET = 1,
+
+ __NFSD_CMD_MAX,
+ NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)
+};
+
+#endif /* _UAPI_LINUX_NFSD_H */
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 77252cb46361..a722dcbf5073 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
/* layers */
int overlay_num;
- struct mmp_overlay overlays[];
+ struct mmp_overlay overlays[] __counted_by(overlay_num);
};
extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
index 8d2a3bfc8dac..47d96e75e8ef 100644
--- a/include/video/uvesafb.h
+++ b/include/video/uvesafb.h
@@ -109,8 +109,6 @@ struct uvesafb_ktask {
u32 ack;
};
-static int uvesafb_exec(struct uvesafb_ktask *tsk);
-
#define UVESAFB_EXACT_RES 1
#define UVESAFB_EXACT_DEPTH 2
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 5dfd30b13f48..5fdef94f0864 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -244,7 +244,7 @@ retry:
for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1)
printk(" %s", p);
printk("\n");
- panic("VFS: Unable to mount root fs on %s", b);
+ panic("VFS: Unable to mount root fs on \"%s\" or %s", pretty_name, b);
out:
put_page(page);
}
diff --git a/init/init_task.c b/init/init_task.c
index ff6c4b9bfe6b..f703116e0523 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -85,6 +85,7 @@ struct task_struct init_task
.nr_cpus_allowed= NR_CPUS,
.mm = NULL,
.active_mm = &init_mm,
+ .faults_disabled_mapping = NULL,
.restart_block = {
.fn = do_no_restart_syscall,
},
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index c53678875416..f04a43044d91 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -53,7 +53,6 @@ static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
{
struct io_ring_ctx *ctx = f->private_data;
- struct io_sq_data *sq = NULL;
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
@@ -64,6 +63,7 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
unsigned int cq_shift = 0;
unsigned int sq_shift = 0;
unsigned int sq_entries, cq_entries;
+ int sq_pid = -1, sq_cpu = -1;
bool has_lock;
unsigned int i;
@@ -143,13 +143,19 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
has_lock = mutex_trylock(&ctx->uring_lock);
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
- sq = ctx->sq_data;
- if (!sq->thread)
- sq = NULL;
+ struct io_sq_data *sq = ctx->sq_data;
+
+ if (mutex_trylock(&sq->lock)) {
+ if (sq->thread) {
+ sq_pid = task_pid_nr(sq->thread);
+ sq_cpu = task_cpu(sq->thread);
+ }
+ mutex_unlock(&sq->lock);
+ }
}
- seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
- seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
+ seq_printf(m, "SqThread:\t%d\n", sq_pid);
+ seq_printf(m, "SqThreadCpu:\t%d\n", sq_cpu);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = io_file_from_index(&ctx->file_table, i);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d839a80a6751..8d1bc6cdfe71 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2674,7 +2674,11 @@ static void io_pages_free(struct page ***pages, int npages)
if (!pages)
return;
+
page_array = *pages;
+ if (!page_array)
+ return;
+
for (i = 0; i < npages; i++)
unpin_user_page(page_array[i]);
kvfree(page_array);
@@ -2758,7 +2762,9 @@ static void io_rings_free(struct io_ring_ctx *ctx)
ctx->sq_sqes = NULL;
} else {
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
+ ctx->n_ring_pages = 0;
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
+ ctx->n_sqe_pages = 0;
}
}
diff --git a/io_uring/openclose.c b/io_uring/openclose.c
index e3fae26e025d..fb73adb89067 100644
--- a/io_uring/openclose.c
+++ b/io_uring/openclose.c
@@ -220,7 +220,6 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
{
struct files_struct *files = current->files;
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
- struct fdtable *fdt;
struct file *file;
int ret = -EBADF;
@@ -230,13 +229,7 @@ int io_close(struct io_kiocb *req, unsigned int issue_flags)
}
spin_lock(&files->file_lock);
- fdt = files_fdtable(files);
- if (close->fd >= fdt->max_fds) {
- spin_unlock(&files->file_lock);
- goto err;
- }
- file = rcu_dereference_protected(fdt->fd[close->fd],
- lockdep_is_held(&files->file_lock));
+ file = files_lookup_fd_locked(files, close->fd);
if (!file || io_is_uring_fops(file)) {
spin_unlock(&files->file_lock);
goto err;
diff --git a/io_uring/rw.c b/io_uring/rw.c
index c8c822fa7980..8f68d5ad4564 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -339,7 +339,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned final_ret = io_fixup_rw_res(req, ret);
- if (req->flags & REQ_F_CUR_POS)
+ if (ret >= 0 && req->flags & REQ_F_CUR_POS)
req->file->f_pos = rw->kiocb.ki_pos;
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
if (!__io_complete_rw_common(req, ret)) {
@@ -913,15 +913,6 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
kiocb_start_write(kiocb);
kiocb->ki_flags |= IOCB_WRITE;
- /*
- * For non-polled IO, set IOCB_DIO_CALLER_COMP, stating that our handler
- * groks deferring the completion to task context. This isn't
- * necessary and useful for polled IO as that can always complete
- * directly.
- */
- if (!(kiocb->ki_flags & IOCB_HIPRI))
- kiocb->ki_flags |= IOCB_DIO_CALLER_COMP;
-
if (likely(req->file->f_op->write_iter))
ret2 = call_write_iter(req->file, kiocb, &s->iter);
else if (req->file->f_op->write)
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ba8215ed663a..5eea4dc0509e 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -302,7 +302,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
if (S_ISREG(mode)) {
struct mqueue_inode_info *info;
@@ -596,7 +596,7 @@ static int mqueue_create_attr(struct dentry *dentry, umode_t mode, void *arg)
put_ipc_ns(ipc_ns);
dir->i_size += DIRENT_SIZE;
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
+ simple_inode_init_ts(dir);
d_instantiate(dentry, inode);
dget(dentry);
@@ -618,7 +618,7 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
- dir->i_mtime = dir->i_atime = inode_set_ctime_current(dir);
+ simple_inode_init_ts(dir);
dir->i_size -= DIRENT_SIZE;
drop_nlink(inode);
dput(dentry);
@@ -657,7 +657,7 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
if (ret <= 0)
return ret;
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
return ret;
}
@@ -1163,7 +1163,7 @@ static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
goto out_unlock;
__do_notify(info);
}
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
}
out_unlock:
spin_unlock(&info->lock);
@@ -1257,7 +1257,7 @@ static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
msg_ptr = msg_get(info);
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
/* There is now free space in queue. */
pipelined_receive(&wake_q, info);
@@ -1395,7 +1395,8 @@ retry:
if (notification == NULL) {
if (info->notify_owner == task_tgid(current)) {
remove_notification(info);
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
} else if (info->notify_owner != NULL) {
ret = -EBUSY;
@@ -1421,7 +1422,7 @@ retry:
info->notify_owner = get_pid(task_tgid(current));
info->notify_user_ns = get_user_ns(current_user_ns());
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
}
spin_unlock(&info->lock);
out_fput:
@@ -1484,7 +1485,7 @@ static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
f.file->f_flags &= ~O_NONBLOCK;
spin_unlock(&f.file->f_lock);
- inode->i_atime = inode_set_ctime_current(inode);
+ inode_set_atime_to_ts(inode, inode_set_ctime_current(inode));
}
spin_unlock(&info->lock);
diff --git a/kernel/acct.c b/kernel/acct.c
index 1a9f929fe629..986c8214dabf 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -246,7 +246,7 @@ static int acct_on(struct filename *pathname)
filp_close(file, NULL);
return PTR_ERR(internal);
}
- err = __mnt_want_write(internal);
+ err = mnt_get_write_access(internal);
if (err) {
mntput(internal);
kfree(acct);
@@ -271,7 +271,7 @@ static int acct_on(struct filename *pathname)
old = xchg(&ns->bacct, &acct->pin);
mutex_unlock(&acct->lock);
pin_kill(old);
- __mnt_drop_write(mnt);
+ mnt_put_write_access(mnt);
mntput(mnt);
return 0;
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 21d2fa815e78..6f0d6fb6523f 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2212,7 +2212,7 @@ __audit_reusename(const __user char *uptr)
if (!n->name)
continue;
if (n->name->uptr == uptr) {
- n->name->refcnt++;
+ atomic_inc(&n->name->refcnt);
return n->name;
}
}
@@ -2241,7 +2241,7 @@ void __audit_getname(struct filename *name)
n->name = name;
n->name_len = AUDIT_NAME_FULL;
name->aname = n;
- name->refcnt++;
+ atomic_inc(&name->refcnt);
}
static inline int audit_copy_fcaps(struct audit_names *name,
@@ -2373,7 +2373,7 @@ out_alloc:
return;
if (name) {
n->name = name;
- name->refcnt++;
+ atomic_inc(&name->refcnt);
}
out:
@@ -2500,7 +2500,7 @@ void __audit_inode_child(struct inode *parent,
if (found_parent) {
found_child->name = found_parent->name;
found_child->name_len = AUDIT_NAME_FULL;
- found_child->name->refcnt++;
+ atomic_inc(&found_child->name->refcnt);
}
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 99d0625b6c82..1aafb2ff2e95 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -118,8 +118,7 @@ static struct inode *bpf_get_inode(struct super_block *sb,
return ERR_PTR(-ENOSPC);
inode->i_ino = get_next_ino();
- inode->i_atime = inode_set_ctime_current(inode);
- inode->i_mtime = inode->i_atime;
+ simple_inode_init_ts(inode);
inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
@@ -147,7 +146,7 @@ static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
d_instantiate(dentry, inode);
dget(dentry);
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
}
static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index c4ab9d6cdbe9..82ad23b1d257 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -308,11 +308,9 @@ again:
rcu_read_lock();
for (;; curr_fd++) {
struct file *f;
- f = task_lookup_next_fd_rcu(curr_task, &curr_fd);
+ f = task_lookup_next_fdget_rcu(curr_task, &curr_fd);
if (!f)
break;
- if (!get_file_rcu(f))
- continue;
/* set info->fd */
info->fd = curr_fd;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 01637677736f..dff067bd56b1 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -678,6 +678,11 @@ static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
size_t pool_size;
size_t tlb_size;
+ if (nslabs > SLABS_PER_PAGE << MAX_ORDER) {
+ nslabs = SLABS_PER_PAGE << MAX_ORDER;
+ nareas = limit_nareas(nareas, nslabs);
+ }
+
pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
pool = kzalloc(pool_size, gfp);
if (!pool)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c72a41f11af..a2f2a9525d72 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1954,6 +1954,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
+ group_leader->group_generation++;
perf_event__header_size(group_leader);
@@ -2144,6 +2145,7 @@ static void perf_group_detach(struct perf_event *event)
if (leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
+ event->group_leader->group_generation++;
goto out;
}
@@ -5440,7 +5442,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
- struct perf_event *sub;
+ struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@@ -5450,6 +5452,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
+ /*
+ * Verify the grouping between the parent and child (inherited)
+ * events is still in tact.
+ *
+ * Specifically:
+ * - leader->ctx->lock pins leader->sibling_list
+ * - parent->child_mutex pins parent->child_list
+ * - parent->ctx->mutex pins parent->sibling_list
+ *
+ * Because parent->ctx != leader->ctx (and child_list nests inside
+ * ctx->mutex), group destruction is not atomic between children, also
+ * see perf_event_release_kernel(). Additionally, parent can grow the
+ * group.
+ *
+ * Therefore it is possible to have parent and child groups in a
+ * different configuration and summing over such a beast makes no sense
+ * what so ever.
+ *
+ * Reject this.
+ */
+ parent = leader->parent;
+ if (parent &&
+ (parent->group_generation != leader->group_generation ||
+ parent->nr_siblings != leader->nr_siblings)) {
+ ret = -ECHILD;
+ goto unlock;
+ }
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@@ -5483,8 +5512,9 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] = atomic64_read(&sub->lost_samples);
}
+unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- return 0;
+ return ret;
}
static int perf_read_group(struct perf_event *event,
@@ -5503,10 +5533,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
- /*
- * By locking the child_mutex of the leader we effectively
- * lock the child list of all siblings.. XXX explain how.
- */
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@@ -13346,6 +13372,8 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
+ if (leader)
+ leader->group_generation = parent_event->group_generation;
return 0;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 3b6d20dfb9a8..640123767726 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1492,9 +1492,7 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
struct file *exe_file;
rcu_read_lock();
- exe_file = rcu_dereference(mm->exe_file);
- if (exe_file && !get_file_rcu(exe_file))
- exe_file = NULL;
+ exe_file = get_file_rcu(&mm->exe_file);
rcu_read_unlock();
return exe_file;
}
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
index 5353edfad8e1..b0639f21041f 100644
--- a/kernel/kcmp.c
+++ b/kernel/kcmp.c
@@ -64,8 +64,10 @@ get_file_raw_ptr(struct task_struct *task, unsigned int idx)
struct file *file;
rcu_read_lock();
- file = task_lookup_fd_rcu(task, idx);
+ file = task_lookup_fdget_rcu(task, idx);
rcu_read_unlock();
+ if (file)
+ fput(file);
return file;
}
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index d973fe6041bf..2deeeca3e71b 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -1126,6 +1126,9 @@ EXPORT_SYMBOL(ww_mutex_lock_interruptible);
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#endif /* !CONFIG_PREEMPT_RT */
+EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin);
+EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end);
+
/**
* atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
* @cnt: the atomic which we are to dec
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8d35b9f9aaa3..dee341ae4ace 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -684,7 +684,7 @@ static void power_down(void)
cpu_relax();
}
-static int load_image_and_restore(bool snapshot_test)
+static int load_image_and_restore(void)
{
int error;
unsigned int flags;
@@ -694,12 +694,12 @@ static int load_image_and_restore(bool snapshot_test)
lock_device_hotplug();
error = create_basic_memory_bitmaps();
if (error) {
- swsusp_close(snapshot_test);
+ swsusp_close();
goto Unlock;
}
error = swsusp_read(&flags);
- swsusp_close(snapshot_test);
+ swsusp_close();
if (!error)
error = hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -788,7 +788,7 @@ int hibernate(void)
pm_pr_dbg("Checking hibernation image\n");
error = swsusp_check(false);
if (!error)
- error = load_image_and_restore(false);
+ error = load_image_and_restore();
}
thaw_processes();
@@ -952,7 +952,7 @@ static int software_resume(void)
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
- swsusp_close(true);
+ swsusp_close();
goto Unlock;
}
@@ -973,7 +973,7 @@ static int software_resume(void)
goto Close_Finish;
}
- error = load_image_and_restore(true);
+ error = load_image_and_restore();
thaw_processes();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
@@ -987,7 +987,7 @@ static int software_resume(void)
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
- swsusp_close(true);
+ swsusp_close();
goto Finish;
}
diff --git a/kernel/power/power.h b/kernel/power/power.h
index a98f95e309a3..17fd9aaaf084 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -172,7 +172,7 @@ int swsusp_check(bool exclusive);
extern void swsusp_free(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
-void swsusp_close(bool exclusive);
+void swsusp_close(void);
#ifdef CONFIG_SUSPEND
extern int swsusp_unmark(void);
#endif
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 74edbce2320b..68a5c2f06957 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -222,7 +222,7 @@ int swsusp_swap_in_use(void)
*/
static unsigned short root_swap = 0xffff;
-static struct block_device *hib_resume_bdev;
+static struct bdev_handle *hib_resume_bdev_handle;
struct hib_bio_batch {
atomic_t count;
@@ -276,7 +276,8 @@ static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
struct bio *bio;
int error = 0;
- bio = bio_alloc(hib_resume_bdev, 1, opf, GFP_NOIO | __GFP_HIGH);
+ bio = bio_alloc(hib_resume_bdev_handle->bdev, 1, opf,
+ GFP_NOIO | __GFP_HIGH);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
@@ -356,14 +357,14 @@ static int swsusp_swap_check(void)
return res;
root_swap = res;
- hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
+ hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
BLK_OPEN_WRITE, NULL, NULL);
- if (IS_ERR(hib_resume_bdev))
- return PTR_ERR(hib_resume_bdev);
+ if (IS_ERR(hib_resume_bdev_handle))
+ return PTR_ERR(hib_resume_bdev_handle);
- res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ res = set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
if (res < 0)
- blkdev_put(hib_resume_bdev, NULL);
+ bdev_release(hib_resume_bdev_handle);
return res;
}
@@ -443,7 +444,7 @@ static int get_swap_writer(struct swap_map_handle *handle)
err_rel:
release_swap_writer(handle);
err_close:
- swsusp_close(false);
+ swsusp_close();
return ret;
}
@@ -508,7 +509,7 @@ static int swap_writer_finish(struct swap_map_handle *handle,
if (error)
free_all_swap_pages(root_swap);
release_swap_writer(handle);
- swsusp_close(false);
+ swsusp_close();
return error;
}
@@ -1522,10 +1523,10 @@ int swsusp_check(bool exclusive)
void *holder = exclusive ? &swsusp_holder : NULL;
int error;
- hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, BLK_OPEN_READ,
- holder, NULL);
- if (!IS_ERR(hib_resume_bdev)) {
- set_blocksize(hib_resume_bdev, PAGE_SIZE);
+ hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
+ BLK_OPEN_READ, holder, NULL);
+ if (!IS_ERR(hib_resume_bdev_handle)) {
+ set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
clear_page(swsusp_header);
error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
swsusp_header, NULL);
@@ -1550,11 +1551,11 @@ int swsusp_check(bool exclusive)
put:
if (error)
- blkdev_put(hib_resume_bdev, holder);
+ bdev_release(hib_resume_bdev_handle);
else
pr_debug("Image signature found, resuming\n");
} else {
- error = PTR_ERR(hib_resume_bdev);
+ error = PTR_ERR(hib_resume_bdev_handle);
}
if (error)
@@ -1568,14 +1569,14 @@ put:
* @exclusive: Close the resume device which is exclusively opened.
*/
-void swsusp_close(bool exclusive)
+void swsusp_close(void)
{
- if (IS_ERR(hib_resume_bdev)) {
+ if (IS_ERR(hib_resume_bdev_handle)) {
pr_debug("Image device not initialised\n");
return;
}
- blkdev_put(hib_resume_bdev, exclusive ? &swsusp_holder : NULL);
+ bdev_release(hib_resume_bdev_handle);
}
/**
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 061a30a8925a..df348aa55d3c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3657,7 +3657,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
*/
deadline = div_s64(deadline * old_weight, weight);
se->deadline = se->vruntime + deadline;
- min_deadline_cb_propagate(&se->run_node, NULL);
+ if (se != cfs_rq->curr)
+ min_deadline_cb_propagate(&se->run_node, NULL);
}
#ifdef CONFIG_SMP
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 9ed5ce989415..4f65824879ab 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -151,6 +151,7 @@ unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
put_task_stack(tsk);
return c.len;
}
+EXPORT_SYMBOL_GPL(stack_trace_save_tsk);
/**
* stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
@@ -301,6 +302,7 @@ unsigned int stack_trace_save_tsk(struct task_struct *task,
save_stack_trace_tsk(task, &trace);
return trace.nr_entries;
}
+EXPORT_SYMBOL_GPL(stack_trace_save_tsk);
/**
* stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c
index 3b21f4063258..881f90f0cbcf 100644
--- a/kernel/trace/fprobe.c
+++ b/kernel/trace/fprobe.c
@@ -189,7 +189,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
{
int i, size;
- if (num < 0)
+ if (num <= 0)
return -EINVAL;
if (!fp->exit_handler) {
@@ -202,8 +202,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
size = fp->nr_maxactive;
else
size = num * num_possible_cpus() * 2;
- if (size < 0)
- return -E2BIG;
+ if (size <= 0)
+ return -EINVAL;
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
if (!fp->rethook)
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3d7a180a8427..e834f149695b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -705,6 +705,41 @@ static struct notifier_block trace_kprobe_module_nb = {
.priority = 1 /* Invoked after kprobe module callback */
};
+static int count_symbols(void *data, unsigned long unused)
+{
+ unsigned int *count = data;
+
+ (*count)++;
+
+ return 0;
+}
+
+struct sym_count_ctx {
+ unsigned int count;
+ const char *name;
+};
+
+static int count_mod_symbols(void *data, const char *name, unsigned long unused)
+{
+ struct sym_count_ctx *ctx = data;
+
+ if (strcmp(name, ctx->name) == 0)
+ ctx->count++;
+
+ return 0;
+}
+
+static unsigned int number_of_same_symbols(char *func_name)
+{
+ struct sym_count_ctx ctx = { .count = 0, .name = func_name };
+
+ kallsyms_on_each_match_symbol(count_symbols, func_name, &ctx.count);
+
+ module_kallsyms_on_each_symbol(NULL, count_mod_symbols, &ctx);
+
+ return ctx.count;
+}
+
static int __trace_kprobe_create(int argc, const char *argv[])
{
/*
@@ -836,6 +871,31 @@ static int __trace_kprobe_create(int argc, const char *argv[])
}
}
+ if (symbol && !strchr(symbol, ':')) {
+ unsigned int count;
+
+ count = number_of_same_symbols(symbol);
+ if (count > 1) {
+ /*
+ * Users should use ADDR to remove the ambiguity of
+ * using KSYM only.
+ */
+ trace_probe_log_err(0, NON_UNIQ_SYMBOL);
+ ret = -EADDRNOTAVAIL;
+
+ goto error;
+ } else if (count == 0) {
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ trace_probe_log_err(0, BAD_PROBE_ADDR);
+ ret = -ENOENT;
+
+ goto error;
+ }
+ }
+
trace_probe_log_set_index(0);
if (event) {
ret = traceprobe_parse_event_name(&event, &group, gbuf,
@@ -963,7 +1023,7 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
* @name: The name of the kprobe event
* @loc: The location of the kprobe event
* @kretprobe: Is this a return probe?
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
@@ -1036,7 +1096,7 @@ EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
/**
* __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
* @cmd: A pointer to the dynevent_cmd struct representing the new event
- * @args: Variable number of arg (pairs), one pair for each field
+ * @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_add_fields() wrapper, which
@@ -1695,6 +1755,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
}
#ifdef CONFIG_PERF_EVENTS
+
/* create a trace_kprobe, but don't add it to global lists */
struct trace_event_call *
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
@@ -1705,6 +1766,24 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
int ret;
char *event;
+ if (func) {
+ unsigned int count;
+
+ count = number_of_same_symbols(func);
+ if (count > 1)
+ /*
+ * Users should use addr to remove the ambiguity of
+ * using func only.
+ */
+ return ERR_PTR(-EADDRNOTAVAIL);
+ else if (count == 0)
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ return ERR_PTR(-ENOENT);
+ }
+
/*
* local trace_kprobes are not added to dyn_event, so they are never
* searched in find_trace_kprobe(). Therefore, there is no concern of
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index db575094c498..d8b302d01083 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -404,7 +404,7 @@ static int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
vmstart = vma->vm_start;
}
if (file) {
- ret = trace_seq_path(s, &file->f_path);
+ ret = trace_seq_path(s, file_user_path(file));
if (ret)
trace_seq_printf(s, "[+0x%lx]",
ip - vmstart);
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 02b432ae7513..850d9ecb6765 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -450,6 +450,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(BAD_MAXACT, "Invalid maxactive number"), \
C(MAXACT_TOO_BIG, "Maxactive is too big"), \
C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \
+ C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \
C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
C(NO_TRACEPOINT, "Tracepoint is not found"), \
C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \
diff --git a/lib/Kconfig b/lib/Kconfig
index c686f4adc124..2d90935d5a21 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -506,6 +506,9 @@ config ASSOCIATIVE_ARRAY
for more information.
+config CLOSURES
+ bool
+
config HAS_IOMEM
bool
depends on !NO_IOMEM
@@ -729,6 +732,11 @@ config PARMAN
config OBJAGG
tristate "objagg" if COMPILE_TEST
+config LWQ_TEST
+ bool "Boot-time test for lwq queuing"
+ help
+ Run boot-time test of light-weight queuing.
+
endmenu
config GENERIC_IOREMAP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index fa307f93fa2e..ce3a4abf40f8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1720,6 +1720,15 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
+config DEBUG_CLOSURES
+ bool "Debug closures (bcache async widgits)"
+ depends on CLOSURES
+ select DEBUG_FS
+ help
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
+
config DEBUG_MAPLE_TREE
bool "Debug maple trees"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 740109b6e2c8..1b311c7fd32b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,7 +45,7 @@ obj-y += lockref.o
obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
- bsearch.o find_bit.o llist.o memweight.o kfifo.o \
+ bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o
@@ -255,6 +255,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+obj-$(CONFIG_CLOSURES) += closure.o
+
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
diff --git a/drivers/md/bcache/closure.c b/lib/closure.c
index d8d9394a6beb..0855e698ced1 100644
--- a/drivers/md/bcache/closure.c
+++ b/lib/closure.c
@@ -6,13 +6,13 @@
* Copyright 2012 Google, Inc.
*/
+#include <linux/closure.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-#include "closure.h"
-
static inline void closure_put_after_sub(struct closure *cl, int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
@@ -45,6 +45,7 @@ void closure_sub(struct closure *cl, int v)
{
closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
}
+EXPORT_SYMBOL(closure_sub);
/*
* closure_put - decrement a closure's refcount
@@ -53,6 +54,7 @@ void closure_put(struct closure *cl)
{
closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
}
+EXPORT_SYMBOL(closure_put);
/*
* closure_wake_up - wake up all closures on a wait list, without memory barrier
@@ -74,6 +76,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
+EXPORT_SYMBOL(__closure_wake_up);
/**
* closure_wait - add a closure to a waitlist
@@ -93,6 +96,7 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
return true;
}
+EXPORT_SYMBOL(closure_wait);
struct closure_syncer {
struct task_struct *task;
@@ -127,8 +131,9 @@ void __sched __closure_sync(struct closure *cl)
__set_current_state(TASK_RUNNING);
}
+EXPORT_SYMBOL(__closure_sync);
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
static LIST_HEAD(closure_list);
static DEFINE_SPINLOCK(closure_list_lock);
@@ -144,6 +149,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
+EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -156,8 +162,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-
-static struct dentry *closure_debug;
+EXPORT_SYMBOL(closure_debug_destroy);
static int debug_show(struct seq_file *f, void *data)
{
@@ -181,7 +186,7 @@ static int debug_show(struct seq_file *f, void *data)
seq_printf(f, " W %pS\n",
(void *) cl->waiting_on);
- seq_printf(f, "\n");
+ seq_puts(f, "\n");
}
spin_unlock_irq(&closure_list_lock);
@@ -190,18 +195,11 @@ static int debug_show(struct seq_file *f, void *data)
DEFINE_SHOW_ATTRIBUTE(debug);
-void __init closure_debug_init(void)
+static int __init closure_debug_init(void)
{
- if (!IS_ERR_OR_NULL(bcache_debug))
- /*
- * it is unnecessary to check return value of
- * debugfs_create_file(), we should not care
- * about this.
- */
- closure_debug = debugfs_create_file(
- "closures", 0400, bcache_debug, NULL, &debug_fops);
+ debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
+ return 0;
}
-#endif
+late_initcall(closure_debug_init)
-MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
-MODULE_LICENSE("GPL");
+#endif
diff --git a/lib/errname.c b/lib/errname.c
index 67739b174a8c..dd1b998552cd 100644
--- a/lib/errname.c
+++ b/lib/errname.c
@@ -228,3 +228,4 @@ const char *errname(int err)
return err > 0 ? name + 1 : name;
}
+EXPORT_SYMBOL(errname);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index f25eb111c051..41f1bcdc4488 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -1,4 +1,5 @@
+#include <linux/atomic.h>
#include <linux/export.h>
#include <linux/generic-radix-tree.h>
#include <linux/gfp.h>
@@ -166,6 +167,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter,
struct genradix_root *r;
struct genradix_node *n;
unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
restart:
r = READ_ONCE(radix->root);
if (!r)
@@ -184,10 +189,17 @@ restart:
(GENRADIX_ARY - 1);
while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ if (iter->offset + objs_per_ptr < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return NULL;
+ }
+
i++;
- iter->offset = round_down(iter->offset +
- genradix_depth_size(level),
- genradix_depth_size(level));
+ iter->offset = round_down(iter->offset + objs_per_ptr,
+ objs_per_ptr);
iter->pos = (iter->offset >> PAGE_SHIFT) *
objs_per_page;
if (i == GENRADIX_ARY)
@@ -201,6 +213,64 @@ restart:
}
EXPORT_SYMBOL(__genradix_iter_peek);
+void *__genradix_iter_peek_prev(struct genradix_iter *iter,
+ struct __genradix *radix,
+ size_t objs_per_page,
+ size_t obj_size_plus_page_remainder)
+{
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
+restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+ return NULL;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
+ iter->offset = genradix_depth_size(level);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+ }
+
+ while (level) {
+ level--;
+
+ i = (iter->offset >> genradix_depth_shift(level)) &
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ iter->offset = round_down(iter->offset, objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ if (!iter->offset)
+ return NULL;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+
+ if (!i)
+ goto restart;
+ --i;
+ }
+
+ n = n->children[i];
+ }
+
+ return &n->data[iter->offset & (PAGE_SIZE - 1)];
+}
+EXPORT_SYMBOL(__genradix_iter_peek_prev);
+
static void genradix_free_recurse(struct genradix_node *n, unsigned level)
{
if (level) {
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 27234a820eeb..de7d11cf4c63 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/fault-inject-usercopy.h>
@@ -10,192 +9,71 @@
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <linux/compat.h>
-#include <net/checksum.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
+#include <linux/iov_iter.h>
-/* covers ubuf and kbuf alike */
-#define iterate_buf(i, n, base, len, off, __p, STEP) { \
- size_t __maybe_unused off = 0; \
- len = n; \
- base = __p + i->iov_offset; \
- len -= (STEP); \
- i->iov_offset += len; \
- n = len; \
-}
-
-/* covers iovec and kvec alike */
-#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
- size_t off = 0; \
- size_t skip = i->iov_offset; \
- do { \
- len = min(n, __p->iov_len - skip); \
- if (likely(len)) { \
- base = __p->iov_base + skip; \
- len -= (STEP); \
- off += len; \
- skip += len; \
- n -= len; \
- if (skip < __p->iov_len) \
- break; \
- } \
- __p++; \
- skip = 0; \
- } while (n); \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_bvec(i, n, base, len, off, p, STEP) { \
- size_t off = 0; \
- unsigned skip = i->iov_offset; \
- while (n) { \
- unsigned offset = p->bv_offset + skip; \
- unsigned left; \
- void *kaddr = kmap_local_page(p->bv_page + \
- offset / PAGE_SIZE); \
- base = kaddr + offset % PAGE_SIZE; \
- len = min(min(n, (size_t)(p->bv_len - skip)), \
- (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
- left = (STEP); \
- kunmap_local(kaddr); \
- len -= left; \
- off += len; \
- skip += len; \
- if (skip == p->bv_len) { \
- skip = 0; \
- p++; \
- } \
- n -= len; \
- if (left) \
- break; \
- } \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_xarray(i, n, base, len, __off, STEP) { \
- __label__ __out; \
- size_t __off = 0; \
- struct folio *folio; \
- loff_t start = i->xarray_start + i->iov_offset; \
- pgoff_t index = start / PAGE_SIZE; \
- XA_STATE(xas, i->xarray, index); \
- \
- len = PAGE_SIZE - offset_in_page(start); \
- rcu_read_lock(); \
- xas_for_each(&xas, folio, ULONG_MAX) { \
- unsigned left; \
- size_t offset; \
- if (xas_retry(&xas, folio)) \
- continue; \
- if (WARN_ON(xa_is_value(folio))) \
- break; \
- if (WARN_ON(folio_test_hugetlb(folio))) \
- break; \
- offset = offset_in_folio(folio, start + __off); \
- while (offset < folio_size(folio)) { \
- base = kmap_local_folio(folio, offset); \
- len = min(n, len); \
- left = (STEP); \
- kunmap_local(base); \
- len -= left; \
- __off += len; \
- n -= len; \
- if (left || n == 0) \
- goto __out; \
- offset += len; \
- len = PAGE_SIZE; \
- } \
- } \
-__out: \
- rcu_read_unlock(); \
- i->iov_offset += __off; \
- n = __off; \
-}
-
-#define __iterate_and_advance(i, n, base, len, off, I, K) { \
- if (unlikely(i->count < n)) \
- n = i->count; \
- if (likely(n)) { \
- if (likely(iter_is_ubuf(i))) { \
- void __user *base; \
- size_t len; \
- iterate_buf(i, n, base, len, off, \
- i->ubuf, (I)) \
- } else if (likely(iter_is_iovec(i))) { \
- const struct iovec *iov = iter_iov(i); \
- void __user *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- iov, (I)) \
- i->nr_segs -= iov - iter_iov(i); \
- i->__iov = iov; \
- } else if (iov_iter_is_bvec(i)) { \
- const struct bio_vec *bvec = i->bvec; \
- void *base; \
- size_t len; \
- iterate_bvec(i, n, base, len, off, \
- bvec, (K)) \
- i->nr_segs -= bvec - i->bvec; \
- i->bvec = bvec; \
- } else if (iov_iter_is_kvec(i)) { \
- const struct kvec *kvec = i->kvec; \
- void *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- kvec, (K)) \
- i->nr_segs -= kvec - i->kvec; \
- i->kvec = kvec; \
- } else if (iov_iter_is_xarray(i)) { \
- void *base; \
- size_t len; \
- iterate_xarray(i, n, base, len, off, \
- (K)) \
- } \
- i->count -= n; \
- } \
-}
-#define iterate_and_advance(i, n, base, len, off, I, K) \
- __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
-
-static int copyout(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
if (should_fail_usercopy())
- return n;
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
+ return len;
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = raw_copy_to_user(iter_to, from, len);
}
- return n;
+ return len;
}
-static int copyout_nofault(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- long res;
+ ssize_t res;
if (should_fail_usercopy())
- return n;
-
- res = copy_to_user_nofault(to, from, n);
+ return len;
- return res < 0 ? n : res;
+ from += progress;
+ res = copy_to_user_nofault(iter_to, from, len);
+ return res < 0 ? len : res;
}
-static int copyin(void *to, const void __user *from, size_t n)
+static __always_inline
+size_t copy_from_user_iter(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- size_t res = n;
+ size_t res = len;
if (should_fail_usercopy())
- return n;
- if (access_ok(from, n)) {
- instrument_copy_from_user_before(to, from, n);
- res = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, res);
+ return len;
+ if (access_ok(iter_from, len)) {
+ to += progress;
+ instrument_copy_from_user_before(to, iter_from, len);
+ res = raw_copy_from_user(to, iter_from, len);
+ instrument_copy_from_user_after(to, iter_from, len, res);
}
return res;
}
+static __always_inline
+size_t memcpy_to_iter(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ memcpy(iter_to, from + progress, len);
+ return 0;
+}
+
+static __always_inline
+size_t memcpy_from_iter(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy(to + progress, iter_from, len);
+ return 0;
+}
+
/*
* fault_in_iov_iter_readable - fault in iov iterator for reading
* @i: iterator
@@ -290,7 +168,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
.iter_type = ITER_IOVEC,
.copy_mc = false,
.nofault = false,
- .user_backed = true,
.data_source = direction,
.__iov = iov,
.nr_segs = nr_segs,
@@ -300,36 +177,35 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
- __wsum sum, size_t off)
-{
- __wsum next = csum_partial_copy_nocheck(from, to, len);
- return csum_block_add(sum, next, off);
-}
-
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyout(base, addr + off, len),
- memcpy(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter, memcpy_to_iter);
}
EXPORT_SYMBOL(_copy_to_iter);
#ifdef CONFIG_ARCH_HAS_COPY_MC
-static int copyout_mc(void __user *to, const void *from, size_t n)
-{
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = copy_mc_to_user((__force void *) to, from, n);
+static __always_inline
+size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = copy_mc_to_user(iter_to, from, len);
}
- return n;
+ return len;
+}
+
+static __always_inline
+size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ return copy_mc_to_kernel(iter_to, from + progress, len);
}
/**
@@ -362,22 +238,35 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
return 0;
if (user_backed_iter(i))
might_fault();
- __iterate_and_advance(i, bytes, base, len, off,
- copyout_mc(base, addr + off, len),
- copy_mc_to_kernel(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter_mc, memcpy_to_iter_mc);
}
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
-static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
- size_t size)
+static __always_inline
+size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return copy_mc_to_kernel(to + progress, iter_from, len);
+}
+
+static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ if (unlikely(!bytes))
+ return 0;
+ return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
+}
+
+static __always_inline
+size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (iov_iter_is_copy_mc(i))
- return (void *)copy_mc_to_kernel(to, from, size);
- return memcpy(to, from, size);
+ if (unlikely(iov_iter_is_copy_mc(i)))
+ return __copy_from_iter_mc(addr, bytes, i);
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter, memcpy_from_iter);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
@@ -387,30 +276,44 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyin(addr + off, base, len),
- memcpy_from_iter(i, addr + off, base, len)
- )
-
- return bytes;
+ return __copy_from_iter(addr, bytes, i);
}
EXPORT_SYMBOL(_copy_from_iter);
+static __always_inline
+size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
+}
+
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_inatomic_nocache(addr + off, base, len),
- memcpy(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_nocache,
+ memcpy_from_iter);
}
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+static __always_inline
+size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_flushcache(to + progress, iter_from, len);
+}
+
+static __always_inline
+size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy_flushcache(to + progress, iter_from, len);
+ return 0;
+}
+
/**
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
@@ -432,12 +335,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_flushcache(addr + off, base, len),
- memcpy_flushcache(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_flushcache,
+ memcpy_from_iter_flushcache);
}
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
@@ -509,10 +409,9 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
- iterate_and_advance(i, n, base, len, off,
- copyout_nofault(base, kaddr + offset + off, len),
- memcpy(base, kaddr + offset + off, len)
- )
+ n = iterate_and_advance(i, bytes, kaddr,
+ copy_to_user_iter_nofault,
+ memcpy_to_iter);
kunmap_local(kaddr);
res += n;
bytes -= n;
@@ -555,14 +454,25 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
-size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+static __always_inline
+size_t zero_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
{
- iterate_and_advance(i, bytes, base, len, count,
- clear_user(base, len),
- memset(base, 0, len)
- )
+ return clear_user(iter_to, len);
+}
+
+static __always_inline
+size_t zero_to_iter(void *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
+{
+ memset(iter_to, 0, len);
+ return 0;
+}
- return bytes;
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+ return iterate_and_advance(i, bytes, NULL,
+ zero_to_user_iter, zero_to_iter);
}
EXPORT_SYMBOL(iov_iter_zero);
@@ -587,10 +497,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
}
p = kmap_atomic(page) + offset;
- iterate_and_advance(i, n, base, len, off,
- copyin(p + off, base, len),
- memcpy_from_iter(i, p + off, base, len)
- )
+ n = __copy_from_iter(p, n, i);
kunmap_atomic(p);
copied += n;
offset += n;
@@ -1181,78 +1088,6 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- __wsum sum, next;
- sum = *csum;
- if (WARN_ON_ONCE(!i->data_source))
- return 0;
-
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_from_user(base, addr + off, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(addr + off, base, len, sum, off);
- })
- )
- *csum = sum;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter);
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
- struct iov_iter *i)
-{
- struct csum_state *csstate = _csstate;
- __wsum sum, next;
-
- if (WARN_ON_ONCE(i->data_source))
- return 0;
- if (unlikely(iov_iter_is_discard(i))) {
- // can't use csum_memcpy() for that one - data is not copied
- csstate->csum = csum_block_add(csstate->csum,
- csum_partial(addr, bytes, 0),
- csstate->off);
- csstate->off += bytes;
- return bytes;
- }
-
- sum = csum_shift(csstate->csum, csstate->off);
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_to_user(addr + off, base, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(base, addr + off, len, sum, off);
- })
- )
- csstate->csum = csum_shift(sum, csstate->off);
- csstate->off += bytes;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_to_iter);
-
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i)
-{
-#ifdef CONFIG_CRYPTO_HASH
- struct ahash_request *hash = hashp;
- struct scatterlist sg;
- size_t copied;
-
- copied = copy_to_iter(addr, bytes, i);
- sg_init_one(&sg, addr, copied);
- ahash_request_set_crypt(hash, &sg, NULL, copied);
- crypto_ahash_update(hash);
- return copied;
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL(hash_and_copy_to_iter);
-
static int iov_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;
diff --git a/lib/llist.c b/lib/llist.c
index 6e668fa5a2c6..f21d0cfbbaaa 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -66,6 +66,34 @@ struct llist_node *llist_del_first(struct llist_head *head)
EXPORT_SYMBOL_GPL(llist_del_first);
/**
+ * llist_del_first_this - delete given entry of lock-less list if it is first
+ * @head: the head for your lock-less list
+ * @this: a list entry.
+ *
+ * If head of the list is given entry, delete and return %true else
+ * return %false.
+ *
+ * Multiple callers can safely call this concurrently with multiple
+ * llist_add() callers, providing all the callers offer a different @this.
+ */
+bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this)
+{
+ struct llist_node *entry, *next;
+
+ /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */
+ entry = smp_load_acquire(&head->first);
+ do {
+ if (entry != this)
+ return false;
+ next = READ_ONCE(entry->next);
+ } while (!try_cmpxchg(&head->first, &entry, next));
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(llist_del_first_this);
+
+/**
* llist_reverse_order - reverse order of a llist chain
* @head: first item of the list to be reversed
*
diff --git a/lib/lwq.c b/lib/lwq.c
new file mode 100644
index 000000000000..57d080a4d53d
--- /dev/null
+++ b/lib/lwq.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Light-weight single-linked queue.
+ *
+ * Entries are enqueued to the head of an llist, with no blocking.
+ * This can happen in any context.
+ *
+ * Entries are dequeued using a spinlock to protect against multiple
+ * access. The llist is staged in reverse order, and refreshed
+ * from the llist when it exhausts.
+ *
+ * This is particularly suitable when work items are queued in BH or
+ * IRQ context, and where work items are handled one at a time by
+ * dedicated threads.
+ */
+#include <linux/rcupdate.h>
+#include <linux/lwq.h>
+
+struct llist_node *__lwq_dequeue(struct lwq *q)
+{
+ struct llist_node *this;
+
+ if (lwq_empty(q))
+ return NULL;
+ spin_lock(&q->lock);
+ this = q->ready;
+ if (!this && !llist_empty(&q->new)) {
+ /* ensure queue doesn't appear transiently lwq_empty */
+ smp_store_release(&q->ready, (void *)1);
+ this = llist_reverse_order(llist_del_all(&q->new));
+ if (!this)
+ q->ready = NULL;
+ }
+ if (this)
+ q->ready = llist_next(this);
+ spin_unlock(&q->lock);
+ return this;
+}
+EXPORT_SYMBOL_GPL(__lwq_dequeue);
+
+/**
+ * lwq_dequeue_all - dequeue all currently enqueued objects
+ * @q: the queue to dequeue from
+ *
+ * Remove and return a linked list of llist_nodes of all the objects that were
+ * in the queue. The first on the list will be the object that was least
+ * recently enqueued.
+ */
+struct llist_node *lwq_dequeue_all(struct lwq *q)
+{
+ struct llist_node *r, *t, **ep;
+
+ if (lwq_empty(q))
+ return NULL;
+
+ spin_lock(&q->lock);
+ r = q->ready;
+ q->ready = NULL;
+ t = llist_del_all(&q->new);
+ spin_unlock(&q->lock);
+ ep = &r;
+ while (*ep)
+ ep = &(*ep)->next;
+ *ep = llist_reverse_order(t);
+ return r;
+}
+EXPORT_SYMBOL_GPL(lwq_dequeue_all);
+
+#if IS_ENABLED(CONFIG_LWQ_TEST)
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait_bit.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+struct tnode {
+ struct lwq_node n;
+ int i;
+ int c;
+};
+
+static int lwq_exercise(void *qv)
+{
+ struct lwq *q = qv;
+ int cnt;
+ struct tnode *t;
+
+ for (cnt = 0; cnt < 10000; cnt++) {
+ wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL);
+ t->c++;
+ if (lwq_enqueue(&t->n, q))
+ wake_up_var(q);
+ }
+ while (!kthread_should_stop())
+ schedule_timeout_idle(1);
+ return 0;
+}
+
+static int lwq_test(void)
+{
+ int i;
+ struct lwq q;
+ struct llist_node *l, **t1, *t2;
+ struct tnode *t;
+ struct task_struct *threads[8];
+
+ printk(KERN_INFO "testing lwq....\n");
+ lwq_init(&q);
+ printk(KERN_INFO " lwq: run some threads\n");
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
+ for (i = 0; i < 100; i++) {
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ break;
+ t->i = i;
+ t->c = 0;
+ if (lwq_enqueue(&t->n, &q))
+ wake_up_var(&q);
+ }
+ /* wait for threads to exit */
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ if (!IS_ERR_OR_NULL(threads[i]))
+ kthread_stop(threads[i]);
+ printk(KERN_INFO " lwq: dequeue first 50:");
+ for (i = 0; i < 50 ; i++) {
+ if (i && (i % 10) == 0) {
+ printk(KERN_CONT "\n");
+ printk(KERN_INFO " lwq: ... ");
+ }
+ t = lwq_dequeue(&q, struct tnode, n);
+ if (t)
+ printk(KERN_CONT " %d(%d)", t->i, t->c);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ l = lwq_dequeue_all(&q);
+ printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n");
+ lwq_for_each_safe(t, t1, t2, &l, n) {
+ if ((t->i % 3) == 0) {
+ t->i = -1;
+ kfree(t);
+ t = NULL;
+ }
+ }
+ if (l)
+ lwq_enqueue_batch(l, &q);
+ printk(KERN_INFO " lwq: dequeue remaining:");
+ while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) {
+ printk(KERN_CONT " %d", t->i);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ return 0;
+}
+
+module_init(lwq_test);
+#endif /* CONFIG_LWQ_TEST*/
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 0e00a84e8e8f..bb24d84a4922 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
/* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
/* Add working room for split (2 nodes) + new parents */
- mas_node_count(mas, nr_nodes + 3);
+ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
/* Detect if allocations run out */
mas->mas_flags |= MA_STATE_PREALLOC;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 9982344cca34..7713f73e66b0 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -31,9 +31,11 @@
* giving the size in the required units. @buf should have room for
* at least 9 bytes and will always be zero terminated.
*
+ * Return value: number of characters of output that would have been written
+ * (which may be greater than len, if output was truncated).
*/
-void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
- char *buf, int len)
+int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ char *buf, int len)
{
static const char *const units_10[] = {
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
@@ -126,8 +128,8 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
else
unit = units_str[units][i];
- snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, unit);
+ return snprintf(buf, len, "%u%s %s", (u32)size,
+ tmp, unit);
}
EXPORT_SYMBOL(string_get_size);
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 06959165e2f9..464eeb90d5ad 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -9,6 +9,7 @@
#include <linux/maple_tree.h>
#include <linux/module.h>
+#include <linux/rwsem.h>
#define MTREE_ALLOC_MAX 0x2000000000000Ul
#define CONFIG_MAPLE_SEARCH
@@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
mas.index = 0;
mas.last = 0;
if (mas_expected_entries(&newmas, nr_entries)) {
@@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
@@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas.index = 0;
mas.last = 0;
rcu_read_lock();
- mas_lock(&newmas);
+ down_write(&newmt_lock);
if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!");
BUG_ON(1);
@@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas_store(&newmas, val);
}
mas_destroy(&newmas);
- mas_unlock(&newmas);
rcu_read_unlock();
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
}
#endif
@@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
void *tmp;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
if (!zero_start)
i = 1;
@@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
mtree_store_range(mt, i*10, (i+1)*10 - gap,
xa_mk_value(i), GFP_KERNEL);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_non_kernel(99999);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
ret = mas_expected_entries(&newmas, nr_entries);
mt_set_non_kernel(0);
MT_BUG_ON(mt, ret != 0);
@@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
/* Duplicate many sizes of trees. Mainly to test expected entry values */
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index b86ba7b0a921..f60e56150feb 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1208,6 +1208,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
return 0;
}
+static bool damon_sysfs_schemes_regions_updating;
+
static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
@@ -1219,8 +1221,10 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
cmd = damon_sysfs_cmd_request.cmd;
if (kdamond && ctx == kdamond->damon_ctx &&
(cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
- cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES)) {
+ cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
+ damon_sysfs_schemes_regions_updating) {
damon_sysfs_schemes_update_regions_stop(ctx);
+ damon_sysfs_schemes_regions_updating = false;
mutex_unlock(&damon_sysfs_lock);
}
@@ -1340,7 +1344,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
{
struct damon_sysfs_kdamond *kdamond;
- static bool damon_sysfs_schemes_regions_updating;
bool total_bytes_only = false;
int err = 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 52d26072dfda..1301ba7b2c9a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_read(&resv_map->rw_sema);
}
}
@@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_read(&resv_map->rw_sema);
}
}
@@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_write(&resv_map->rw_sema);
}
}
@@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_write(&resv_map->rw_sema);
}
}
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- if (!__vma_shareable_lock(vma))
- return 1;
+ if (__vma_shareable_lock(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- return down_write_trylock(&vma_lock->rw_sema);
+ return down_write_trylock(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ return down_write_trylock(&resv_map->rw_sema);
+ }
+
+ return 1;
}
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
lockdep_assert_held(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ lockdep_assert_held(&resv_map->rw_sema);
}
}
@@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
__hugetlb_vma_unlock_write_put(vma_lock);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ /* no free for anon vmas, but still need to unlock */
+ up_write(&resv_map->rw_sema);
}
}
@@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
+ init_rwsem(&resv_map->rw_sema);
resv_map->adds_in_progress = 0;
/*
@@ -1138,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
- set_vma_private_data(vma, (get_vma_private_data(vma) &
- HPAGE_RESV_MASK) | (unsigned long)map);
+ set_vma_private_data(vma, (unsigned long)map);
}
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
@@ -5274,9 +5306,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
return len + old_addr - old_end;
}
-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page, zap_flags_t zap_flags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -5405,16 +5437,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_flush_mmu_tlbonly(tlb);
}
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page,
- zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
+ if (!vma->vm_file) /* hugetlbfs_file_mmap error */
+ return;
+
+ adjust_range_if_pmd_sharing_possible(vma, start, end);
hugetlb_vma_lock_write(vma);
- i_mmap_lock_write(vma->vm_file->f_mapping);
+ if (vma->vm_file)
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+}
- /* mmu notification performed in caller */
- __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ zap_flags_t zap_flags = details ? details->zap_flags : 0;
+
+ if (!vma->vm_file) /* hugetlbfs_file_mmap error */
+ return;
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
/*
@@ -5427,11 +5468,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* someone else.
*/
__hugetlb_vma_unlock_write_free(vma);
- i_mmap_unlock_write(vma->vm_file->f_mapping);
} else {
- i_mmap_unlock_write(vma->vm_file->f_mapping);
hugetlb_vma_unlock_write(vma);
}
+
+ if (vma->vm_file)
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -6811,8 +6853,10 @@ out_err:
*/
if (chg >= 0 && add < 0)
region_abort(resv_map, from, to, regions_needed);
- if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
kref_put(&resv_map->refs, resv_map_release);
+ set_vma_resv_map(vma, NULL);
+ }
return false;
}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index ca4b6ff080a6..6e3cb118d20e 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -621,7 +621,7 @@ void kasan_report_async(void)
}
#endif /* CONFIG_KASAN_HW_TAGS */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/*
* With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
* canonical half of the address space) cause out-of-bounds shadow memory reads
diff --git a/mm/memory.c b/mm/memory.c
index 6c264d2f969c..517221f01303 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1683,7 +1683,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
- __unmap_hugepage_range_final(tlb, vma, start, end,
+ __unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags);
}
} else
@@ -1728,8 +1728,12 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
do {
- unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
+ unsigned long start = start_addr;
+ unsigned long end = end_addr;
+ hugetlb_zap_begin(vma, &start, &end);
+ unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
+ hugetlb_zap_end(vma, &details);
} while ((vma = mas_find(mas, tree_end - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
}
@@ -1753,9 +1757,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
- if (is_vm_hugetlb_page(vma))
- adjust_range_if_pmd_sharing_possible(vma, &range.start,
- &range.end);
+ hugetlb_zap_begin(vma, &range.start, &range.end);
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
@@ -1766,6 +1768,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unmap_single_vma(&tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
+ hugetlb_zap_end(vma, details);
}
/**
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f1b00d6ac7ee..29ebf1e7898c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
* the home node for vmas we already updated before.
*/
old = vma_policy(vma);
- if (!old)
+ if (!old) {
+ prev = vma;
continue;
+ }
if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
err = -EOPNOTSUPP;
break;
diff --git a/mm/migrate.c b/mm/migrate.c
index 2053b54556ca..06086dc9da28 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2162,6 +2162,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
const int __user *nodes,
int __user *status, int flags)
{
+ compat_uptr_t __user *compat_pages = (void __user *)pages;
int current_node = NUMA_NO_NODE;
LIST_HEAD(pagelist);
int start, i;
@@ -2174,8 +2175,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int node;
err = -EFAULT;
- if (get_user(p, pages + i))
- goto out_flush;
+ if (in_compat_syscall()) {
+ compat_uptr_t cp;
+
+ if (get_user(cp, compat_pages + i))
+ goto out_flush;
+
+ p = compat_ptr(cp);
+ } else {
+ if (get_user(p, pages + i))
+ goto out_flush;
+ }
if (get_user(node, nodes + i))
goto out_flush;
diff --git a/mm/mmap.c b/mm/mmap.c
index b56a7f0c9f85..9e018d8dd7d6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -583,11 +583,12 @@ again:
* dup_anon_vma() - Helper function to duplicate anon_vma
* @dst: The destination VMA
* @src: The source VMA
+ * @dup: Pointer to the destination VMA when successful.
*
* Returns: 0 on success.
*/
static inline int dup_anon_vma(struct vm_area_struct *dst,
- struct vm_area_struct *src)
+ struct vm_area_struct *src, struct vm_area_struct **dup)
{
/*
* Easily overlooked: when mprotect shifts the boundary, make sure the
@@ -595,9 +596,15 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
* anon pages imported.
*/
if (src->anon_vma && !dst->anon_vma) {
+ int ret;
+
vma_assert_write_locked(dst);
dst->anon_vma = src->anon_vma;
- return anon_vma_clone(dst, src);
+ ret = anon_vma_clone(dst, src);
+ if (ret)
+ return ret;
+
+ *dup = dst;
}
return 0;
@@ -624,6 +631,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next)
{
+ struct vm_area_struct *anon_dup = NULL;
bool remove_next = false;
struct vma_prepare vp;
@@ -633,7 +641,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
remove_next = true;
vma_start_write(next);
- ret = dup_anon_vma(vma, next);
+ ret = dup_anon_vma(vma, next, &anon_dup);
if (ret)
return ret;
}
@@ -661,6 +669,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
return 0;
nomem:
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
return -ENOMEM;
}
@@ -860,6 +870,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
{
struct vm_area_struct *curr, *next, *res;
struct vm_area_struct *vma, *adjust, *remove, *remove2;
+ struct vm_area_struct *anon_dup = NULL;
struct vma_prepare vp;
pgoff_t vma_pgoff;
int err = 0;
@@ -927,18 +938,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_start_write(next);
remove = next; /* case 1 */
vma_end = next->vm_end;
- err = dup_anon_vma(prev, next);
+ err = dup_anon_vma(prev, next, &anon_dup);
if (curr) { /* case 6 */
vma_start_write(curr);
remove = curr;
remove2 = next;
if (!next->anon_vma)
- err = dup_anon_vma(prev, curr);
+ err = dup_anon_vma(prev, curr, &anon_dup);
}
} else if (merge_prev) { /* case 2 */
if (curr) {
vma_start_write(curr);
- err = dup_anon_vma(prev, curr);
+ err = dup_anon_vma(prev, curr, &anon_dup);
if (end == curr->vm_end) { /* case 7 */
remove = curr;
} else { /* case 5 */
@@ -954,7 +965,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_end = addr;
adjust = next;
adj_start = -(prev->vm_end - addr);
- err = dup_anon_vma(next, prev);
+ err = dup_anon_vma(next, prev, &anon_dup);
} else {
/*
* Note that cases 3 and 8 are the ONLY ones where prev
@@ -968,14 +979,14 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_pgoff = curr->vm_pgoff;
vma_start_write(curr);
remove = curr;
- err = dup_anon_vma(next, curr);
+ err = dup_anon_vma(next, curr, &anon_dup);
}
}
}
/* Error in anon_vma clone. */
if (err)
- return NULL;
+ goto anon_vma_fail;
if (vma_start < vma->vm_start || vma_end > vma->vm_end)
vma_expanded = true;
@@ -988,7 +999,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
}
if (vma_iter_prealloc(vmi, vma))
- return NULL;
+ goto prealloc_fail;
init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
@@ -1016,6 +1027,15 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_complete(&vp, vmi, mm);
khugepaged_enter_vma(res, vm_flags);
return res;
+
+prealloc_fail:
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
+
+anon_vma_fail:
+ vma_iter_set(vmi, addr);
+ vma_iter_load(vmi);
+ return NULL;
}
/*
@@ -3143,13 +3163,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (!len)
return 0;
- if (mmap_write_lock_killable(mm))
- return -EINTR;
-
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
ret = check_brk_limits(addr, len);
if (ret)
goto limits_failed;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 95546f376302..85741403948f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6475,6 +6475,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
next_page = page;
current_buddy = page + size;
}
+ page = next_page;
if (set_page_guard(zone, current_buddy, high, migratetype))
continue;
@@ -6482,7 +6483,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
if (current_buddy != target) {
add_to_free_list(current_buddy, zone, high, migratetype);
set_buddy_order(current_buddy, high);
- page = next_page;
}
}
}
diff --git a/mm/readahead.c b/mm/readahead.c
index e815c114de21..6925e6959fd3 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -735,7 +735,8 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
*/
ret = -EINVAL;
if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
- !S_ISREG(file_inode(f.file)->i_mode))
+ (!S_ISREG(file_inode(f.file)->i_mode) &&
+ !S_ISBLK(file_inode(f.file)->i_mode)))
goto out;
ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
diff --git a/mm/shmem.c b/mm/shmem.c
index 69595d341882..6b102965d355 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1112,7 +1112,7 @@ whole_folios:
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
shmem_undo_range(inode, lstart, lend, false);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
inode_inc_iversion(inode);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -1224,7 +1224,7 @@ static int shmem_setattr(struct mnt_idmap *idmap,
if (!error && update_ctime) {
inode_set_ctime_current(inode);
if (update_mtime)
- inode->i_mtime = inode_get_ctime(inode);
+ inode_set_mtime_to_ts(inode, inode_get_ctime(inode));
inode_inc_iversion(inode);
}
return error;
@@ -2455,7 +2455,7 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
inode->i_ino = ino;
inode_init_owner(idmap, inode, dir, mode);
inode->i_blocks = 0;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_generation = get_random_u32();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
@@ -2463,7 +2463,7 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
atomic_set(&info->stop_eviction, 0);
info->seals = F_SEAL_SEAL;
info->flags = flags & VM_NORESERVE;
- info->i_crtime = inode->i_mtime;
+ info->i_crtime = inode_get_mtime(inode);
info->fsflags = (dir == NULL) ? 0 :
SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
if (info->fsflags)
@@ -3229,7 +3229,7 @@ shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
goto out_iput;
dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
@@ -3318,8 +3318,8 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
}
dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inode_inc_iversion(dir);
inc_nlink(inode);
ihold(inode); /* New dentry reference */
@@ -3339,8 +3339,8 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
simple_offset_remove(shmem_get_offset_ctx(dir), dentry);
dir->i_size -= BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_to_ts(dir,
- inode_set_ctime_current(inode));
+ inode_set_mtime_to_ts(dir,
+ inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode)));
inode_inc_iversion(dir);
drop_nlink(inode);
dput(dentry); /* Undo the count from "create" - this does all the work */
@@ -3488,7 +3488,7 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
folio_put(folio);
}
dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_mtime = inode_set_ctime_current(dir);
+ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
inode_inc_iversion(dir);
d_instantiate(dentry, inode);
dget(dentry);
@@ -3714,7 +3714,7 @@ static const struct xattr_handler shmem_user_xattr_handler = {
.set = shmem_xattr_handler_set,
};
-static const struct xattr_handler *shmem_xattr_handlers[] = {
+static const struct xattr_handler * const shmem_xattr_handlers[] = {
&shmem_security_xattr_handler,
&shmem_trusted_xattr_handler,
&shmem_user_xattr_handler,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8fda308e400d..9bbffe82d65a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -895,10 +895,13 @@ void __init setup_kmalloc_cache_index_table(void)
static unsigned int __kmalloc_minalign(void)
{
+ unsigned int minalign = dma_get_cache_alignment();
+
if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
is_swiotlb_allocated())
- return ARCH_KMALLOC_MINALIGN;
- return dma_get_cache_alignment();
+ minalign = ARCH_KMALLOC_MINALIGN;
+
+ return max(minalign, arch_slab_minalign());
}
void __init
diff --git a/mm/swapfile.c b/mm/swapfile.c
index e52f486834eb..4bc70f459164 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2530,11 +2530,10 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
exit_swap_address_space(p->type);
inode = mapping->host;
- if (S_ISBLK(inode->i_mode)) {
- struct block_device *bdev = I_BDEV(inode);
-
- set_blocksize(bdev, old_block_size);
- blkdev_put(bdev, p);
+ if (p->bdev_handle) {
+ set_blocksize(p->bdev, old_block_size);
+ bdev_release(p->bdev_handle);
+ p->bdev_handle = NULL;
}
inode_lock(inode);
@@ -2764,13 +2763,14 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
int error;
if (S_ISBLK(inode->i_mode)) {
- p->bdev = blkdev_get_by_dev(inode->i_rdev,
+ p->bdev_handle = bdev_open_by_dev(inode->i_rdev,
BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL);
- if (IS_ERR(p->bdev)) {
- error = PTR_ERR(p->bdev);
- p->bdev = NULL;
+ if (IS_ERR(p->bdev_handle)) {
+ error = PTR_ERR(p->bdev_handle);
+ p->bdev_handle = NULL;
return error;
}
+ p->bdev = p->bdev_handle->bdev;
p->old_block_size = block_size(p->bdev);
error = set_blocksize(p->bdev, PAGE_SIZE);
if (error < 0)
@@ -3206,9 +3206,10 @@ bad_swap:
p->percpu_cluster = NULL;
free_percpu(p->cluster_next_cpu);
p->cluster_next_cpu = NULL;
- if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
+ if (p->bdev_handle) {
set_blocksize(p->bdev, p->old_block_size);
- blkdev_put(p->bdev, p);
+ bdev_release(p->bdev_handle);
+ p->bdev_handle = NULL;
}
inode = NULL;
destroy_swap_extents(p);
diff --git a/mm/zswap.c b/mm/zswap.c
index 083c693602b8..37d2b1cb2ecb 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1383,8 +1383,8 @@ reject:
shrink:
pool = zswap_pool_last_get();
- if (pool)
- queue_work(shrink_wq, &pool->shrink_work);
+ if (pool && !queue_work(shrink_wq, &pool->shrink_work))
+ zswap_pool_put(pool);
goto reject;
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7a6f20338db8..73470cc3518a 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1627,6 +1627,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
return ERR_PTR(-EOPNOTSUPP);
}
+ /* Reject outgoing connection to device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (!bacmp(&hdev->bdaddr, dst)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ dst);
+ return ERR_PTR(-ECONNREFUSED);
+ }
+
acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
if (!acl) {
acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 31d02b54eea1..1e1c9147356c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -26,6 +26,8 @@
/* Bluetooth HCI event handling. */
#include <asm/unaligned.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -3268,6 +3270,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
+ /* Reject incoming connection from device with same BD ADDR against
+ * CVE-2020-26555
+ */
+ if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
+ bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+ &ev->bdaddr);
+ hci_reject_conn(hdev, &ev->bdaddr);
+ return;
+ }
+
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
&flags);
@@ -4742,6 +4754,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
if (!conn)
goto unlock;
+ /* Ignore NULL link key against CVE-2020-26555 */
+ if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
+ bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
+ &ev->bdaddr);
+ hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
hci_conn_drop(conn);
@@ -5274,8 +5295,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* available, then do not declare that OOB data is
* present.
*/
- if (!memcmp(data->rand256, ZERO_KEY, 16) ||
- !memcmp(data->hash256, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash256, ZERO_KEY, 16))
return 0x00;
return 0x02;
@@ -5285,8 +5306,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
* not supported by the hardware, then check that if
* P-192 data values are present.
*/
- if (!memcmp(data->rand192, ZERO_KEY, 16) ||
- !memcmp(data->hash192, ZERO_KEY, 16))
+ if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
+ !crypto_memneq(data->hash192, ZERO_KEY, 16))
return 0x00;
return 0x01;
@@ -5303,7 +5324,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_conn_ssp_enabled(conn))
goto unlock;
hci_conn_hold(conn);
@@ -5550,7 +5571,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (!conn)
+ if (!conn || !hci_conn_ssp_enabled(conn))
goto unlock;
/* Reset the authentication requirement to unknown */
@@ -7021,6 +7042,14 @@ unlock:
hci_dev_unlock(hdev);
}
+static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
+{
+ u8 handle = PTR_UINT(data);
+
+ return hci_le_terminate_big_sync(hdev, handle,
+ HCI_ERROR_LOCAL_HOST_TERM);
+}
+
static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -7065,16 +7094,17 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
rcu_read_lock();
}
+ rcu_read_unlock();
+
if (!ev->status && !i)
/* If no BISes have been connected for the BIG,
* terminate. This is in case all bound connections
* have been closed before the BIG creation
* has completed.
*/
- hci_le_terminate_big_sync(hdev, ev->handle,
- HCI_ERROR_LOCAL_HOST_TERM);
+ hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
+ UINT_PTR(ev->handle), NULL);
- rcu_read_unlock();
hci_dev_unlock(hdev);
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5e4f718073b7..3e7cd330d731 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -488,7 +488,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
ni->type = hdev->dev_type;
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
- memcpy(ni->name, hdev->name, 8);
+ memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+ strnlen(hdev->name, sizeof(ni->name)), '\0');
opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
break;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index d06e07a0ea5a..a15ab0b874a9 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -5369,6 +5369,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
{
int err = 0;
u16 handle = conn->handle;
+ bool disconnect = false;
struct hci_conn *c;
switch (conn->state) {
@@ -5399,24 +5400,15 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
hci_dev_unlock(hdev);
return 0;
case BT_BOUND:
- hci_dev_lock(hdev);
- hci_conn_failed(conn, reason);
- hci_dev_unlock(hdev);
- return 0;
+ break;
default:
- hci_dev_lock(hdev);
- conn->state = BT_CLOSED;
- hci_disconn_cfm(conn, reason);
- hci_conn_del(conn);
- hci_dev_unlock(hdev);
- return 0;
+ disconnect = true;
+ break;
}
hci_dev_lock(hdev);
- /* Check if the connection hasn't been cleanup while waiting
- * commands to complete.
- */
+ /* Check if the connection has been cleaned up concurrently */
c = hci_conn_hash_lookup_handle(hdev, handle);
if (!c || c != conn) {
err = 0;
@@ -5428,7 +5420,13 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
* or in case of LE it was still scanning so it can be cleanup
* safely.
*/
- hci_conn_failed(conn, reason);
+ if (disconnect) {
+ conn->state = BT_CLOSED;
+ hci_disconn_cfm(conn, reason);
+ hci_conn_del(conn);
+ } else {
+ hci_conn_failed(conn, reason);
+ }
unlock:
hci_dev_unlock(hdev);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 176eb5834746..103d46fa0eeb 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -50,7 +50,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
-#include <linux/uio.h>
+#include <linux/iov_iter.h>
#include <linux/indirect_call_wrapper.h>
#include <net/protocol.h>
@@ -61,6 +61,7 @@
#include <net/tcp_states.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
+#include <crypto/hash.h>
/*
* Is a socket 'connection oriented' ?
@@ -489,6 +490,24 @@ short_copy:
return 0;
}
+static size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i)
+{
+#ifdef CONFIG_CRYPTO_HASH
+ struct ahash_request *hash = hashp;
+ struct scatterlist sg;
+ size_t copied;
+
+ copied = copy_to_iter(addr, bytes, i);
+ sg_init_one(&sg, addr, copied);
+ ahash_request_set_crypt(hash, &sg, NULL, copied);
+ crypto_ahash_update(hash);
+ return copied;
+#else
+ return 0;
+#endif
+}
+
/**
* skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
* and update a hash.
@@ -716,6 +735,60 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
}
EXPORT_SYMBOL(zerocopy_sg_from_iter);
+static __always_inline
+size_t copy_to_user_iter_csum(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ __wsum next, *csum = priv2;
+
+ next = csum_and_copy_to_user(from + progress, iter_to, len);
+ *csum = csum_block_add(*csum, next, progress);
+ return next ? 0 : len;
+}
+
+static __always_inline
+size_t memcpy_to_iter_csum(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ __wsum *csum = priv2;
+ __wsum next = csum_partial_copy_nocheck(from, iter_to, len);
+
+ *csum = csum_block_add(*csum, next, progress);
+ return 0;
+}
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+static size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
+ struct iov_iter *i)
+{
+ struct csum_state *csstate = _csstate;
+ __wsum sum;
+
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
+ if (unlikely(iov_iter_is_discard(i))) {
+ // can't use csum_memcpy() for that one - data is not copied
+ csstate->csum = csum_block_add(csstate->csum,
+ csum_partial(addr, bytes, 0),
+ csstate->off);
+ csstate->off += bytes;
+ return bytes;
+ }
+
+ sum = csum_shift(csstate->csum, csstate->off);
+
+ bytes = iterate_and_advance2(i, bytes, (void *)addr, &sum,
+ copy_to_user_iter_csum,
+ memcpy_to_iter_csum);
+ csstate->csum = csum_shift(sum, csstate->off);
+ csstate->off += bytes;
+ return bytes;
+}
+
/**
* skb_copy_and_csum_datagram - Copy datagram to an iovec iterator
* and update a checksum.
diff --git a/net/core/dev.c b/net/core/dev.c
index 5aaf5753d4e4..9f3f8930c691 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -345,7 +345,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
{
list_del(&name_node->list);
- netdev_name_node_del(name_node);
kfree(name_node->name);
netdev_name_node_free(name_node);
}
@@ -364,6 +363,8 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
if (name_node == dev->name_node || name_node->dev != dev)
return -EINVAL;
+ netdev_name_node_del(name_node);
+ synchronize_rcu();
__netdev_name_node_alt_destroy(name_node);
return 0;
@@ -380,6 +381,7 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
/* Device list insertion */
static void list_netdevice(struct net_device *dev)
{
+ struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL();
@@ -390,6 +392,10 @@ static void list_netdevice(struct net_device *dev)
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
write_unlock(&dev_base_lock);
+
+ netdev_for_each_altname(dev, name_node)
+ netdev_name_node_add(net, name_node);
+
/* We reserved the ifindex, this can't fail */
WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
@@ -401,12 +407,16 @@ static void list_netdevice(struct net_device *dev)
*/
static void unlist_netdevice(struct net_device *dev, bool lock)
{
+ struct netdev_name_node *name_node;
struct net *net = dev_net(dev);
ASSERT_RTNL();
xa_erase(&net->dev_by_index, dev->ifindex);
+ netdev_for_each_altname(dev, name_node)
+ netdev_name_node_del(name_node);
+
/* Unlink dev from the device chain */
if (lock)
write_lock(&dev_base_lock);
@@ -1086,7 +1096,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
for_each_netdev(net, d) {
struct netdev_name_node *name_node;
- list_for_each_entry(name_node, &d->name_node->list, list) {
+
+ netdev_for_each_altname(d, name_node) {
if (!sscanf(name_node->name, name, &i))
continue;
if (i < 0 || i >= max_netdevices)
@@ -1123,6 +1134,26 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
return -ENFILE;
}
+static int dev_prep_valid_name(struct net *net, struct net_device *dev,
+ const char *want_name, char *out_name)
+{
+ int ret;
+
+ if (!dev_valid_name(want_name))
+ return -EINVAL;
+
+ if (strchr(want_name, '%')) {
+ ret = __dev_alloc_name(net, want_name, out_name);
+ return ret < 0 ? ret : 0;
+ } else if (netdev_name_in_use(net, want_name)) {
+ return -EEXIST;
+ } else if (out_name != want_name) {
+ strscpy(out_name, want_name, IFNAMSIZ);
+ }
+
+ return 0;
+}
+
static int dev_alloc_name_ns(struct net *net,
struct net_device *dev,
const char *name)
@@ -1160,19 +1191,13 @@ EXPORT_SYMBOL(dev_alloc_name);
static int dev_get_valid_name(struct net *net, struct net_device *dev,
const char *name)
{
- BUG_ON(!net);
-
- if (!dev_valid_name(name))
- return -EINVAL;
-
- if (strchr(name, '%'))
- return dev_alloc_name_ns(net, dev, name);
- else if (netdev_name_in_use(net, name))
- return -EEXIST;
- else if (dev->name != name)
- strscpy(dev->name, name, IFNAMSIZ);
+ char buf[IFNAMSIZ];
+ int ret;
- return 0;
+ ret = dev_prep_valid_name(net, dev, name, buf);
+ if (ret >= 0)
+ strscpy(dev->name, buf, IFNAMSIZ);
+ return ret;
}
/**
@@ -11037,7 +11062,9 @@ EXPORT_SYMBOL(unregister_netdev);
int __dev_change_net_namespace(struct net_device *dev, struct net *net,
const char *pat, int new_ifindex)
{
+ struct netdev_name_node *name_node;
struct net *net_old = dev_net(dev);
+ char new_name[IFNAMSIZ] = {};
int err, new_nsid;
ASSERT_RTNL();
@@ -11064,10 +11091,15 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- err = dev_get_valid_name(net, dev, pat);
+ err = dev_prep_valid_name(net, dev, pat, new_name);
if (err < 0)
goto out;
}
+ /* Check that none of the altnames conflicts. */
+ err = -EEXIST;
+ netdev_for_each_altname(dev, name_node)
+ if (netdev_name_in_use(net, name_node->name))
+ goto out;
/* Check that new_ifindex isn't used yet. */
if (new_ifindex) {
@@ -11135,6 +11167,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
netdev_adjacent_add_links(dev);
+ if (new_name[0]) /* Rename the netdev to prepared name */
+ strscpy(dev->name, new_name, IFNAMSIZ);
+
/* Fixup kobjects */
err = device_rename(&dev->dev, dev->name);
WARN_ON(err);
diff --git a/net/core/dev.h b/net/core/dev.h
index e075e198092c..fa2e9c5c4122 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -62,6 +62,9 @@ struct netdev_name_node {
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_change_name(struct net_device *dev, const char *newname);
+#define netdev_for_each_altname(dev, namenode) \
+ list_for_each_entry((namenode), &(dev)->name_node->list, list)
+
int netdev_name_node_alt_create(struct net_device *dev, const char *name);
int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9c09f091cbff..df81c1f0a570 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
- int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+ int max_clean = atomic_read(&tbl->gc_entries) -
+ READ_ONCE(tbl->gc_thresh2);
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
}
}
- tbl->last_flush = jiffies;
+ WRITE_ONCE(tbl->last_flush, jiffies);
write_unlock_bh(&tbl->lock);
@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
- int entries;
+ int entries, gc_thresh3;
if (exempt_from_gc)
goto do_alloc;
entries = atomic_inc_return(&tbl->gc_entries) - 1;
- if (entries >= tbl->gc_thresh3 ||
- (entries >= tbl->gc_thresh2 &&
- time_after(now, tbl->last_flush + 5 * HZ))) {
- if (!neigh_forced_gc(tbl) &&
- entries >= tbl->gc_thresh3) {
+ gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
+ if (entries >= gc_thresh3 ||
+ (entries >= READ_ONCE(tbl->gc_thresh2) &&
+ time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
+ if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
- tbl->last_rand = jiffies;
+
+ WRITE_ONCE(tbl->last_rand, jiffies);
list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+ if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
- nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
- nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
- nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
- nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+ nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
+ NDTA_PAD) ||
+ nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
+ nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
+ nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
goto nla_put_failure;
{
unsigned long now = jiffies;
- long flush_delta = now - tbl->last_flush;
- long rand_delta = now - tbl->last_rand;
+ long flush_delta = now - READ_ONCE(tbl->last_flush);
+ long rand_delta = now - READ_ONCE(tbl->last_rand);
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
- .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
};
rcu_read_lock();
@@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
- ndst.ndts_allocs += st->allocs;
- ndst.ndts_destroys += st->destroys;
- ndst.ndts_hash_grows += st->hash_grows;
- ndst.ndts_res_failed += st->res_failed;
- ndst.ndts_lookups += st->lookups;
- ndst.ndts_hits += st->hits;
- ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
- ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
- ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
- ndst.ndts_forced_gc_runs += st->forced_gc_runs;
- ndst.ndts_table_fulls += st->table_fulls;
+ ndst.ndts_allocs += READ_ONCE(st->allocs);
+ ndst.ndts_destroys += READ_ONCE(st->destroys);
+ ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
+ ndst.ndts_res_failed += READ_ONCE(st->res_failed);
+ ndst.ndts_lookups += READ_ONCE(st->lookups);
+ ndst.ndts_hits += READ_ONCE(st->hits);
+ ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
+ ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
+ ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
+ ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
+ ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
}
if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_tbl_lock;
if (tb[NDTA_THRESH1])
- tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+ WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
if (tb[NDTA_THRESH2])
- tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+ WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
if (tb[NDTA_THRESH3])
- tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+ WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
if (tb[NDTA_GC_INTERVAL])
- tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+ WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
err = 0;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index f56b8d697014..4d1696677c48 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -669,19 +669,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_puts(seq, " Flags: ");
for (i = 0; i < NR_PKT_FLAGS; i++) {
- if (i == F_FLOW_SEQ)
+ if (i == FLOW_SEQ_SHIFT)
if (!pkt_dev->cflows)
continue;
- if (pkt_dev->flags & (1 << i))
+ if (pkt_dev->flags & (1 << i)) {
seq_printf(seq, "%s ", pkt_flag_names[i]);
- else if (i == F_FLOW_SEQ)
- seq_puts(seq, "FLOW_RND ");
-
#ifdef CONFIG_XFRM
- if (i == F_IPSEC && pkt_dev->spi)
- seq_printf(seq, "spi:%u", pkt_dev->spi);
+ if (i == IPSEC_SHIFT && pkt_dev->spi)
+ seq_printf(seq, "spi:%u ", pkt_dev->spi);
#endif
+ } else if (i == FLOW_SEQ_SHIFT) {
+ seq_puts(seq, "FLOW_RND ");
+ }
}
seq_puts(seq, "\n");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4a2ec33bfb51..53c377d054f0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -5503,13 +5503,11 @@ static unsigned int
rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
enum netdev_offload_xstats_type type)
{
- bool enabled = netdev_offload_xstats_enabled(dev, type);
-
return nla_total_size(0) +
/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
nla_total_size(sizeof(u8)) +
/* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
- (enabled ? nla_total_size(sizeof(u8)) : 0) +
+ nla_total_size(sizeof(u8)) +
0;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4eaf7ed0d1f4..2bfa6a7ba244 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -62,6 +62,7 @@
#include <linux/if_vlan.h>
#include <linux/mpls.h>
#include <linux/kcov.h>
+#include <linux/iov_iter.h>
#include <net/protocol.h>
#include <net/dst.h>
@@ -6931,3 +6932,42 @@ out:
return spliced ?: ret;
}
EXPORT_SYMBOL(skb_splice_from_iter);
+
+static __always_inline
+size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ __wsum *csum = priv2;
+ __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len);
+
+ *csum = csum_block_add(*csum, next, progress);
+ return 0;
+}
+
+static __always_inline
+size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ __wsum next, *csum = priv2;
+
+ next = csum_and_copy_from_user(iter_from, to + progress, len);
+ *csum = csum_block_add(*csum, next, progress);
+ return next ? 0 : len;
+}
+
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ size_t copied;
+
+ if (WARN_ON_ONCE(!i->data_source))
+ return false;
+ copied = iterate_and_advance2(i, bytes, addr, csum,
+ copy_from_user_iter_csum,
+ memcpy_from_iter_csum);
+ if (likely(copied == bytes))
+ return true;
+ iov_iter_revert(i, copied);
+ return false;
+}
+EXPORT_SYMBOL(csum_and_copy_from_iter_full);
diff --git a/net/core/stream.c b/net/core/stream.c
index f5c4e47df165..96fbcb9bbb30 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sk_stream_wait_close);
*/
int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
{
- int err = 0;
+ int ret, err = 0;
long vm_wait = 0;
long current_timeo = *timeo_p;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -142,11 +142,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
- (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
- (sk_stream_memory_free(sk) &&
- !vm_wait), &wait);
+ ret = sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
+ (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+ (sk_stream_memory_free(sk) && !vm_wait),
+ &wait);
sk->sk_write_pending--;
+ if (ret < 0)
+ goto do_error;
if (vm_wait) {
vm_wait -= current_timeo;
diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
index 883ed9be81f9..0515d6604b3b 100644
--- a/net/ethtool/bitset.c
+++ b/net/ethtool/bitset.c
@@ -431,10 +431,8 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
ethnl_string_array_t names,
struct netlink_ext_ack *extack, bool *mod)
{
- u32 *orig_bitmap, *saved_bitmap = NULL;
struct nlattr *bit_attr;
bool no_mask;
- bool dummy;
int rem;
int ret;
@@ -450,22 +448,8 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
}
no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
- if (no_mask) {
- unsigned int nwords = DIV_ROUND_UP(nbits, 32);
- unsigned int nbytes = nwords * sizeof(u32);
-
- /* The bitmap size is only the size of the map part without
- * its mask part.
- */
- saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL);
- if (!saved_bitmap)
- return -ENOMEM;
- memcpy(saved_bitmap, bitmap, nbytes);
- ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy);
- orig_bitmap = saved_bitmap;
- } else {
- orig_bitmap = bitmap;
- }
+ if (no_mask)
+ ethnl_bitmap32_clear(bitmap, 0, nbits, mod);
nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
bool old_val, new_val;
@@ -474,14 +458,13 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) {
NL_SET_ERR_MSG_ATTR(extack, bit_attr,
"only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask,
names, extack);
if (ret < 0)
- goto out;
- old_val = orig_bitmap[idx / 32] & ((u32)1 << (idx % 32));
+ return ret;
+ old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32));
if (new_val != old_val) {
if (new_val)
bitmap[idx / 32] |= ((u32)1 << (idx % 32));
@@ -491,10 +474,7 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
}
}
- ret = 0;
-out:
- kfree(saved_bitmap);
- return ret;
+ return 0;
}
static int ethnl_compact_sanity_checks(unsigned int nbits,
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index d0bc1dd8e65a..80c7302692c7 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -87,29 +87,6 @@ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
}
EXPORT_SYMBOL(handshake_genl_put);
-/*
- * dup() a kernel socket for use as a user space file descriptor
- * in the current process. The kernel socket must have an
- * instatiated struct file.
- *
- * Implicit argument: "current()"
- */
-static int handshake_dup(struct socket *sock)
-{
- struct file *file;
- int newfd;
-
- file = get_file(sock->file);
- newfd = get_unused_fd_flags(O_CLOEXEC);
- if (newfd < 0) {
- fput(file);
- return newfd;
- }
-
- fd_install(newfd, file);
- return newfd;
-}
-
int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
@@ -133,17 +110,20 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
goto out_status;
sock = req->hr_sk->sk_socket;
- fd = handshake_dup(sock);
+ fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
err = fd;
goto out_complete;
}
+
err = req->hr_proto->hp_accept(req, info, fd);
if (err) {
- fput(sock->file);
+ put_unused_fd(fd);
goto out_complete;
}
+ fd_install(fd, get_file(sock->file));
+
trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
return 0;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3d2e30e20473..2713c9b06c4c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -597,7 +597,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
add_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending += writebias;
- sk->sk_wait_pending++;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -613,7 +612,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
}
remove_wait_queue(sk_sleep(sk), &wait);
sk->sk_write_pending -= writebias;
- sk->sk_wait_pending--;
return timeo;
}
@@ -642,6 +640,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC) {
+ sk->sk_disconnects++;
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
goto out;
@@ -696,6 +695,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
tcp_sk(sk)->fastopen_req &&
tcp_sk(sk)->fastopen_req->data ? 1 : 0;
+ int dis = sk->sk_disconnects;
/* Error code is set above */
if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
@@ -704,6 +704,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
+
+ if (dis != sk->sk_disconnects) {
+ err = -EPIPE;
+ goto out;
+ }
}
/* Connection was closed by RST, timeout, ICMP error
@@ -725,6 +730,7 @@ out:
sock_error:
err = sock_error(sk) ? : -ECONNABORTED;
sock->state = SS_UNCONNECTED;
+ sk->sk_disconnects++;
if (sk->sk_prot->disconnect(sk, flags))
sock->state = SS_DISCONNECTING;
goto out;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 2be2d4922557..4ccfc104f13a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -732,7 +732,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
@@ -784,7 +786,7 @@ int esp_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 1ea82bc33ef1..5eb1b8d302bb 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1325,15 +1325,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
unsigned char scope)
{
struct fib_nh *nh;
+ __be32 saddr;
if (nhc->nhc_family != AF_INET)
return inet_select_addr(nhc->nhc_dev, 0, scope);
nh = container_of(nhc, struct fib_nh, nh_common);
- nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
+ saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
- return nh->nh_saddr;
+ WRITE_ONCE(nh->nh_saddr, saddr);
+ WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid));
+
+ return saddr;
}
__be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
@@ -1347,8 +1350,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
- if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
- return nh->nh_saddr;
+ if (READ_ONCE(nh->nh_saddr_genid) ==
+ atomic_read(&net->ipv4.dev_addr_genid))
+ return READ_ONCE(nh->nh_saddr);
}
return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index aeebe8816689..394a498c2823 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1145,7 +1145,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
if (newsk) {
struct inet_connection_sock *newicsk = inet_csk(newsk);
- newsk->sk_wait_pending = 0;
inet_sk_set_state(newsk, TCP_SYN_RECV);
newicsk->icsk_bind_hash = NULL;
newicsk->icsk_bind2_hash = NULL;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c32f5e28758b..598c1b114d2c 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -149,8 +149,14 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
const struct sock *sk)
{
#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb2->family)
- return false;
+ if (sk->sk_family != tb2->family) {
+ if (sk->sk_family == AF_INET)
+ return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) &&
+ tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+
+ return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) &&
+ sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr;
+ }
if (sk->sk_family == AF_INET6)
return ipv6_addr_equal(&tb2->v6_rcv_saddr,
@@ -819,19 +825,7 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
tb->l3mdev != l3mdev)
return false;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family != tb->family) {
- if (sk->sk_family == AF_INET)
- return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
- tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
-
- return false;
- }
-
- if (sk->sk_family == AF_INET6)
- return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
-#endif
- return tb->rcv_saddr == sk->sk_rcv_saddr;
+ return inet_bind2_bucket_addr_match(tb, sk);
}
bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3f66cdeef7de..3d3a24f79573 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -831,7 +831,9 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
*/
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
- sk_wait_data(sk, &timeo, NULL);
+ ret = sk_wait_data(sk, &timeo, NULL);
+ if (ret < 0)
+ break;
if (signal_pending(current)) {
ret = sock_intr_errno(timeo);
break;
@@ -925,10 +927,11 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
return mss_now;
}
-/* In some cases, both sendmsg() could have added an skb to the write queue,
- * but failed adding payload on it. We need to remove it to consume less
+/* In some cases, sendmsg() could have added an skb to the write queue,
+ * but failed adding payload on it. We need to remove it to consume less
* memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
- * epoll() users.
+ * epoll() users. Another reason is that tcp_write_xmit() does not like
+ * finding an empty skb in the write queue.
*/
void tcp_remove_empty_skb(struct sock *sk)
{
@@ -1287,6 +1290,7 @@ new_segment:
wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ tcp_remove_empty_skb(sk);
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
@@ -2442,7 +2446,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
__sk_flush_backlog(sk);
} else {
tcp_cleanup_rbuf(sk, copied);
- sk_wait_data(sk, &timeo, last);
+ err = sk_wait_data(sk, &timeo, last);
+ if (err < 0) {
+ err = copied ? : err;
+ goto out;
+ }
}
if ((flags & MSG_PEEK) &&
@@ -2966,12 +2974,6 @@ int tcp_disconnect(struct sock *sk, int flags)
int old_state = sk->sk_state;
u32 seq;
- /* Deny disconnect if other threads are blocked in sk_wait_event()
- * or inet_wait_for_connect().
- */
- if (sk->sk_wait_pending)
- return -EBUSY;
-
if (old_state != TCP_CLOSE)
tcp_set_state(sk, TCP_CLOSE);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 327268203001..53b0d62fd2c2 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -307,6 +307,10 @@ msg_bytes_ready:
}
data = tcp_msg_wait_data(sk, psock, timeo);
+ if (data < 0) {
+ copied = data;
+ goto unlock;
+ }
if (data && !sk_psock_queue_empty(psock))
goto msg_bytes_ready;
copied = -EAGAIN;
@@ -317,6 +321,8 @@ out:
tcp_rcv_space_adjust(sk);
if (copied > 0)
__tcp_cleanup_rbuf(sk, copied);
+
+unlock:
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
@@ -351,6 +357,10 @@ msg_bytes_ready:
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
data = tcp_msg_wait_data(sk, psock, timeo);
+ if (data < 0) {
+ ret = data;
+ goto unlock;
+ }
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
@@ -361,6 +371,8 @@ msg_bytes_ready:
copied = -EAGAIN;
}
ret = copied;
+
+unlock:
release_sock(sk);
sk_psock_put(sk, psock);
return ret;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8afb0950a697..804821d6bd4d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2207,16 +2207,17 @@ void tcp_enter_loss(struct sock *sk)
* restore sanity to the SACK scoreboard. If the apparent reneging
* persists until this RTO then we'll clear the SACK scoreboard.
*/
-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
{
- if (flag & FLAG_SACK_RENEGING &&
- flag & FLAG_SND_UNA_ADVANCED) {
+ if (*ack_flag & FLAG_SACK_RENEGING &&
+ *ack_flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
return true;
}
return false;
@@ -2986,7 +2987,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
- if (tcp_check_sack_reneging(sk, flag))
+ if (tcp_check_sack_reneging(sk, ack_flag))
return;
/* C. Check consistency of the current state. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 27140e5cdc06..4167e8a48b60 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1869,6 +1869,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TLS_DEVICE
tail->decrypted != skb->decrypted ||
#endif
+ !mptcp_skb_can_collapse(tail, skb) ||
thtail->doff != th->doff ||
memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
goto no_coalesce;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9c8c42c280b7..f0723460753c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2542,6 +2542,18 @@ static bool tcp_pacing_check(struct sock *sk)
return true;
}
+static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
+{
+ const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
+
+ /* No skb in the rtx queue. */
+ if (!node)
+ return true;
+
+ /* Only one skb in rtx queue. */
+ return !node->rb_left && !node->rb_right;
+}
+
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits)
@@ -2579,12 +2591,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit += extra_bytes;
}
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
- /* Always send skb if rtx queue is empty.
+ /* Always send skb if rtx queue is empty or has one skb.
* No need to wait for TX completion to call us back,
* after softirq/tasklet schedule.
* This helps when TX completions are delayed too much.
*/
- if (tcp_rtx_queue_empty(sk))
+ if (tcp_rtx_queue_empty_or_single_skb(sk))
return false;
set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
@@ -2788,7 +2800,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- u32 timeout, rto_delta_us;
+ u32 timeout, timeout_us, rto_delta_us;
int early_retrans;
/* Don't do any loss probe on a Fast Open connection before 3WHS
@@ -2812,11 +2824,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
* sample is available then probe after TCP_TIMEOUT_INIT.
*/
if (tp->srtt_us) {
- timeout = usecs_to_jiffies(tp->srtt_us >> 2);
+ timeout_us = tp->srtt_us >> 2;
if (tp->packets_out == 1)
- timeout += TCP_RTO_MIN;
+ timeout_us += tcp_rto_min_us(sk);
else
- timeout += TCP_TIMEOUT_MIN;
+ timeout_us += TCP_TIMEOUT_MIN_US;
+ timeout = usecs_to_jiffies(timeout_us);
} else {
timeout = TCP_TIMEOUT_INIT;
}
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index acf4869c5d3b..bba10110fbbc 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -104,7 +104,7 @@ bool tcp_rack_mark_lost(struct sock *sk)
tp->rack.advanced = 0;
tcp_rack_detect_loss(sk, &timeout);
if (timeout) {
- timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
+ timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
timeout, inet_csk(sk)->icsk_rto);
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index fddd0cbdede1..2cc1a45742d8 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -770,7 +770,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
skb->csum = csum_block_sub(skb->csum, csumdiff,
skb->len - trimlen);
}
- pskb_trim(skb, skb->len - trimlen);
+ ret = pskb_trim(skb, skb->len - trimlen);
+ if (unlikely(ret))
+ return ret;
ret = nexthdr[1];
@@ -831,7 +833,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 41a680c76d2e..42fb6996b077 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -117,10 +117,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- if (likely(xdst->u.rt6.rt6i_idev))
- in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
rt6_uncached_list_del(&xdst->u.rt6);
+ if (likely(xdst->u.rt6.rt6i_idev))
+ in6_dev_put(xdst->u.rt6.rt6i_idev);
xfrm_dst_destroy(xdst);
}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0665ff5e456e..a2db0585dce0 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -912,7 +912,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
*/
if (ieee80211_key_identical(sdata, old_key, key)) {
ret = -EALREADY;
- goto unlock;
+ goto out;
}
key->local = sdata->local;
@@ -940,7 +940,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
out:
ieee80211_key_free_unused(key);
- unlock:
mutex_unlock(&sdata->local->key_mtx);
return ret;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index e751cda5eef6..8f6b6f56b65b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2468,8 +2468,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
/* drop unicast public action frames when using MPF */
if (is_unicast_ether_addr(mgmt->da) &&
- ieee80211_is_public_action((void *)rx->skb->data,
- rx->skb->len))
+ ieee80211_is_protected_dual_of_public_action(rx->skb))
return -EACCES;
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c3b83cb390d9..886ab689a8ae 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1298,7 +1298,7 @@ alloc_skb:
if (copy == 0) {
u64 snd_una = READ_ONCE(msk->snd_una);
- if (snd_una != msk->snd_nxt) {
+ if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
tcp_remove_empty_skb(ssk);
return 0;
}
@@ -1306,11 +1306,6 @@ alloc_skb:
zero_window_probe = true;
data_seq = snd_una - 1;
copy = 1;
-
- /* all mptcp-level data is acked, no skbs should be present into the
- * ssk write queue
- */
- WARN_ON_ONCE(reuse_skb);
}
copy = min_t(size_t, copy, info->limit - info->sent);
@@ -1339,7 +1334,6 @@ alloc_skb:
if (reuse_skb) {
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
mpext->data_len += copy;
- WARN_ON_ONCE(zero_window_probe);
goto out;
}
@@ -2354,6 +2348,26 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
#define MPTCP_CF_PUSH BIT(1)
#define MPTCP_CF_FASTCLOSE BIT(2)
+/* be sure to send a reset only if the caller asked for it, also
+ * clean completely the subflow status when the subflow reaches
+ * TCP_CLOSE state
+ */
+static void __mptcp_subflow_disconnect(struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ unsigned int flags)
+{
+ if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+ (flags & MPTCP_CF_FASTCLOSE)) {
+ /* The MPTCP code never wait on the subflow sockets, TCP-level
+ * disconnect should never fail
+ */
+ WARN_ON_ONCE(tcp_disconnect(ssk, 0));
+ mptcp_subflow_ctx_reset(subflow);
+ } else {
+ tcp_shutdown(ssk, SEND_SHUTDOWN);
+ }
+}
+
/* subflow sockets can be either outgoing (connect) or incoming
* (accept).
*
@@ -2391,7 +2405,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
- /* be sure to force the tcp_disconnect() path,
+ /* be sure to force the tcp_close path
* to generate the egress reset
*/
ssk->sk_lingertime = 0;
@@ -2401,11 +2415,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
if (!dispose_it) {
- /* The MPTCP code never wait on the subflow sockets, TCP-level
- * disconnect should never fail
- */
- WARN_ON_ONCE(tcp_disconnect(ssk, 0));
- mptcp_subflow_ctx_reset(subflow);
+ __mptcp_subflow_disconnect(ssk, subflow, flags);
release_sock(ssk);
goto out;
@@ -3098,12 +3108,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- /* Deny disconnect if other threads are blocked in sk_wait_event()
- * or inet_wait_for_connect().
- */
- if (sk->sk_wait_pending)
- return -EBUSY;
-
/* We are on the fastopen error path. We can't call straight into the
* subflows cleanup code due to lock nesting (we are already under
* msk->firstsocket lock).
@@ -3173,7 +3177,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
#endif
- nsk->sk_wait_pending = 0;
__mptcp_init_sock(nsk);
msk = mptcp_sk(nsk);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 1d34d700bd09..920a5a29ae1d 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -316,12 +316,6 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
-static bool nf_flow_is_outdated(const struct flow_offload *flow)
-{
- return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
- !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
-}
-
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
@@ -407,12 +401,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
+static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
+ const struct flow_offload *flow)
+{
+ return flow_table->type->gc && flow_table->type->gc(flow);
+}
+
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
- nf_flow_is_outdated(flow))
+ nf_flow_custom_gc(flow_table, flow))
flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a72b6aeefb1b..29c651804cb2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3166,7 +3166,7 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
if (err < 0)
return err;
- if (!tb[NFTA_EXPR_DATA])
+ if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
return -EINVAL;
type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
@@ -5556,7 +5556,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- u64 timeout = 0;
nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
if (nest == NULL)
@@ -5592,15 +5591,11 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
htonl(*nft_set_ext_flags(ext))))
goto nla_put_failure;
- if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
- timeout = *nft_set_ext_timeout(ext);
- if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
- nf_jiffies64_to_msecs(timeout),
- NFTA_SET_ELEM_PAD))
- goto nla_put_failure;
- } else if (set->flags & NFT_SET_TIMEOUT) {
- timeout = READ_ONCE(set->timeout);
- }
+ if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+ nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
+ nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)),
+ NFTA_SET_ELEM_PAD))
+ goto nla_put_failure;
if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
u64 expires, now = get_jiffies_64();
@@ -5615,9 +5610,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
nf_jiffies64_to_msecs(expires),
NFTA_SET_ELEM_PAD))
goto nla_put_failure;
-
- if (reset)
- *nft_set_ext_expiration(ext) = now + timeout;
}
if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
@@ -7615,6 +7607,16 @@ nla_put_failure:
return -1;
}
+static void audit_log_obj_reset(const struct nft_table *table,
+ unsigned int base_seq, unsigned int nentries)
+{
+ char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
+
+ audit_log_nfcfg(buf, table->family, nentries,
+ AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+ kfree(buf);
+}
+
struct nft_obj_filter {
char *table;
u32 type;
@@ -7629,8 +7631,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nftables_pernet *nft_net;
+ unsigned int entries = 0;
struct nft_object *obj;
bool reset = false;
+ int rc = 0;
if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
reset = true;
@@ -7643,6 +7647,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
if (family != NFPROTO_UNSPEC && family != table->family)
continue;
+ entries = 0;
list_for_each_entry_rcu(obj, &table->objects, list) {
if (!nft_is_active(net, obj))
goto cont;
@@ -7658,34 +7663,27 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
filter->type != NFT_OBJECT_UNSPEC &&
obj->ops->type->type != filter->type)
goto cont;
- if (reset) {
- char *buf = kasprintf(GFP_ATOMIC,
- "%s:%u",
- table->name,
- nft_net->base_seq);
-
- audit_log_nfcfg(buf,
- family,
- obj->handle,
- AUDIT_NFT_OP_OBJ_RESET,
- GFP_ATOMIC);
- kfree(buf);
- }
- if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWOBJ,
- NLM_F_MULTI | NLM_F_APPEND,
- table->family, table,
- obj, reset) < 0)
- goto done;
+ rc = nf_tables_fill_obj_info(skb, net,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWOBJ,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family, table,
+ obj, reset);
+ if (rc < 0)
+ break;
+ entries++;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
+ if (reset && entries)
+ audit_log_obj_reset(table, nft_net->base_seq, entries);
+ if (rc < 0)
+ break;
}
-done:
rcu_read_unlock();
cb->args[0] = idx;
@@ -7790,7 +7788,7 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
audit_log_nfcfg(buf,
family,
- obj->handle,
+ 1,
AUDIT_NFT_OP_OBJ_RESET,
GFP_ATOMIC);
kfree(buf);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 53c9e76473ba..f03f4d4d7d88 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -698,8 +698,8 @@ nfulnl_log_packet(struct net *net,
unsigned int plen = 0;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
+ enum ip_conntrack_info ctinfo = 0;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info ctinfo;
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;
diff --git a/net/netfilter/nft_inner.c b/net/netfilter/nft_inner.c
index 28e2873ba24e..928312d01eb1 100644
--- a/net/netfilter/nft_inner.c
+++ b/net/netfilter/nft_inner.c
@@ -298,6 +298,7 @@ static int nft_inner_init(const struct nft_ctx *ctx,
int err;
if (!tb[NFTA_INNER_FLAGS] ||
+ !tb[NFTA_INNER_NUM] ||
!tb[NFTA_INNER_HDRSIZE] ||
!tb[NFTA_INNER_TYPE] ||
!tb[NFTA_INNER_EXPR])
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 120f6d395b98..0a689c8e0295 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -179,7 +179,7 @@ void nft_payload_eval(const struct nft_expr *expr,
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
- if (!skb_mac_header_was_set(skb))
+ if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
goto err;
if (skb_vlan_tag_present(skb) &&
diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
index 25a75591583e..2e164a319945 100644
--- a/net/netfilter/nft_set_pipapo.h
+++ b/net/netfilter/nft_set_pipapo.h
@@ -147,7 +147,7 @@ struct nft_pipapo_match {
unsigned long * __percpu *scratch;
size_t bsize_max;
struct rcu_head rcu;
- struct nft_pipapo_field f[];
+ struct nft_pipapo_field f[] __counted_by(field_count);
};
/**
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 2660ceab3759..e34662f4a71e 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -568,6 +568,8 @@ static void *nft_rbtree_deactivate(const struct net *net,
nft_rbtree_interval_end(this)) {
parent = parent->rb_right;
continue;
+ } else if (nft_set_elem_expired(&rbe->ext)) {
+ break;
} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
parent = parent->rb_left;
continue;
diff --git a/net/nfc/nci/spi.c b/net/nfc/nci/spi.c
index 0935527d1d12..b68150c971d0 100644
--- a/net/nfc/nci/spi.c
+++ b/net/nfc/nci/spi.c
@@ -151,6 +151,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
int ret;
skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
/* add the NCI SPI header to the start of the buffer */
hdr = skb_push(skb, NCI_SPI_HDR_LEN);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 08630896b6c8..14cc8fe8584b 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1180,7 +1180,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
init_waitqueue_head(&data->read_wait);
mutex_lock(&rfkill_global_mutex);
- mutex_lock(&data->mtx);
/*
* start getting events from elsewhere but hold mtx to get
* startup events added first
@@ -1192,10 +1191,11 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
goto free;
rfkill_sync(rfkill);
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+ mutex_lock(&data->mtx);
list_add_tail(&ev->list, &data->events);
+ mutex_unlock(&data->mtx);
}
list_add(&data->list, &rfkill_fds);
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
file->private_data = data;
@@ -1203,7 +1203,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
return stream_open(inode, file);
free:
- mutex_unlock(&data->mtx);
mutex_unlock(&rfkill_global_mutex);
mutex_destroy(&data->mtx);
list_for_each_entry_safe(ev, tmp, &data->events, list)
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index e9d1b2f2ff0a..5a81505fba9a 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -108,13 +108,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
rfkill->clk = devm_clk_get(&pdev->dev, NULL);
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
rfkill->reset_gpio = gpio;
- gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 7c652d14528b..fb52d6f9aff9 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -278,7 +278,16 @@ err_nat:
return err;
}
+static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
+{
+ return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
+ test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
+ !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
+ !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+}
+
static struct nf_flowtable_type flowtable_ct = {
+ .gc = tcf_ct_flow_is_outdated,
.action = tcf_ct_flow_table_fill_actions,
.owner = THIS_MODULE,
};
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3554085bc2be..880c5f16b29c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -902,6 +902,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
cl->cl_flags |= HFSC_USC;
}
+static void
+hfsc_upgrade_rt(struct hfsc_class *cl)
+{
+ cl->cl_fsc = cl->cl_rsc;
+ rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
+ cl->cl_flags |= HFSC_FSC;
+}
+
static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
[TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
[TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
@@ -1011,10 +1019,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (parent == NULL)
return -ENOENT;
}
- if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
- NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
- return -EINVAL;
- }
if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
return -EINVAL;
@@ -1065,6 +1069,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->cf_tree = RB_ROOT;
sch_tree_lock(sch);
+ /* Check if the inner class is a misconfigured 'rt' */
+ if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+ NL_SET_ERR_MSG(extack,
+ "Forced curve change on parent 'rt' to 'sc'");
+ hfsc_upgrade_rt(parent);
+ }
qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
list_add_tail(&cl->siblings, &parent->children);
if (parent->level == 0)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index bacdd971615e..35ddebae8894 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1201,6 +1201,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
(struct smc_clc_msg_accept_confirm_v2 *)aclc;
struct smc_clc_first_contact_ext *fce =
smc_get_clc_first_contact_ext(clc_v2, false);
+ struct net *net = sock_net(&smc->sk);
int rc;
if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
@@ -1210,7 +1211,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
ini->smcrv2.uses_gateway = false;
} else {
- if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
+ if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
ini->smcrv2.nexthop_mac,
&ini->smcrv2.uses_gateway))
@@ -2361,7 +2362,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
smc_find_ism_store_rc(rc, ini);
return (!rc) ? 0 : ini->rc;
}
- return SMC_CLC_DECL_NOSMCDEV;
+ return prfx_rc;
}
/* listen worker: finish RDMA setup */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 9b66d6aeeb1a..89981dbe46c9 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -193,7 +193,7 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
}
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
u8 nexthop_mac[], u8 *uses_gateway)
{
struct neighbour *neigh = NULL;
@@ -205,7 +205,7 @@ int smc_ib_find_route(__be32 saddr, __be32 daddr,
if (daddr == cpu_to_be32(INADDR_NONE))
goto out;
- rt = ip_route_output_flow(&init_net, &fl4, NULL);
+ rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto out;
if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
@@ -235,6 +235,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) {
struct in_device *in_dev = __in_dev_get_rcu(ndev);
+ struct net *net = dev_net(ndev);
const struct in_ifaddr *ifa;
bool subnet_match = false;
@@ -248,7 +249,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
}
if (!subnet_match)
goto out;
- if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr,
+ if (smcrv2->daddr && smc_ib_find_route(net, smcrv2->saddr,
smcrv2->daddr,
smcrv2->nexthop_mac,
&smcrv2->uses_gateway))
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 4df5f8c8a0a1..ef8ac2b7546d 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -112,7 +112,7 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
unsigned short vlan_id, u8 gid[], u8 *sgid_index,
struct smc_init_info_smcrv2 *smcrv2);
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
u8 nexthop_mac[], u8 *uses_gateway);
bool smc_ib_is_valid_local_systemid(void);
int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/net/socket.c b/net/socket.c
index c4a6f5532955..5740475e084c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -403,7 +403,7 @@ static const struct xattr_handler sockfs_security_xattr_handler = {
.set = sockfs_security_xattr_set,
};
-static const struct xattr_handler *sockfs_xattr_handlers[] = {
+static const struct xattr_handler * const sockfs_xattr_handlers[] = {
&sockfs_xattr_handler,
&sockfs_security_xattr_handler,
NULL
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 65a6c6429a53..caa94cf57123 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -83,7 +83,6 @@ static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
return NULL;
req->rq_xprt = xprt;
- INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
@@ -349,10 +348,8 @@ found:
}
/*
- * Add callback request to callback list. The callback
- * service sleeps on the sv_cb_waitq waiting for new
- * requests. Wake it up after adding enqueing the
- * request.
+ * Add callback request to callback list. Wake a thread
+ * on the first pool (usually the only pool) to handle it.
*/
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
{
@@ -369,8 +366,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
dprintk("RPC: add callback request to list\n");
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
- wake_up(&bc_serv->sv_cb_waitq);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
}
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index f420d8457345..dcc2b4f49e77 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -472,7 +472,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
return NULL;
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
switch (mode & S_IFMT) {
case S_IFDIR:
inode->i_fop = &simple_dir_operations;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 812fda9d45dd..3f2ea7a0496f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -438,9 +438,7 @@ EXPORT_SYMBOL_GPL(svc_bind);
static void
__svc_init_bc(struct svc_serv *serv)
{
- INIT_LIST_HEAD(&serv->sv_cb_list);
- spin_lock_init(&serv->sv_cb_lock);
- init_waitqueue_head(&serv->sv_cb_waitq);
+ lwq_init(&serv->sv_cb_list);
}
#else
static void
@@ -509,9 +507,9 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
i, serv->sv_name);
pool->sp_id = i;
- INIT_LIST_HEAD(&pool->sp_sockets);
+ lwq_init(&pool->sp_xprts);
INIT_LIST_HEAD(&pool->sp_all_threads);
- spin_lock_init(&pool->sp_lock);
+ init_llist_head(&pool->sp_idle_threads);
percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
@@ -575,11 +573,12 @@ svc_destroy(struct kref *ref)
timer_shutdown_sync(&serv->sv_temptimer);
/*
- * The last user is gone and thus all sockets have to be destroyed to
- * the point. Check this.
+ * Remaining transports at this point are not expected.
*/
- BUG_ON(!list_empty(&serv->sv_permsocks));
- BUG_ON(!list_empty(&serv->sv_tempsocks));
+ WARN_ONCE(!list_empty(&serv->sv_permsocks),
+ "SVC: permsocks remain for %s\n", serv->sv_program->pg_name);
+ WARN_ONCE(!list_empty(&serv->sv_tempsocks),
+ "SVC: tempsocks remain for %s\n", serv->sv_program->pg_name);
cache_clean_deferred(serv);
@@ -642,7 +641,6 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
folio_batch_init(&rqstp->rq_fbatch);
- __set_bit(RQ_BUSY, &rqstp->rq_flags);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
@@ -682,10 +680,13 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
serv->sv_nrthreads += 1;
spin_unlock_bh(&serv->sv_lock);
- spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads++;
+ atomic_inc(&pool->sp_nrthreads);
+
+ /* Protected by whatever lock the service uses when calling
+ * svc_set_num_threads()
+ */
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
- spin_unlock_bh(&pool->sp_lock);
+
return rqstp;
}
@@ -701,23 +702,25 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
void svc_pool_wake_idle_thread(struct svc_pool *pool)
{
struct svc_rqst *rqstp;
+ struct llist_node *ln;
rcu_read_lock();
- list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
- if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
- continue;
-
+ ln = READ_ONCE(pool->sp_idle_threads.first);
+ if (ln) {
+ rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
WRITE_ONCE(rqstp->rq_qtime, ktime_get());
- wake_up_process(rqstp->rq_task);
+ if (!task_is_running(rqstp->rq_task)) {
+ wake_up_process(rqstp->rq_task);
+ trace_svc_wake_up(rqstp->rq_task->pid);
+ percpu_counter_inc(&pool->sp_threads_woken);
+ }
rcu_read_unlock();
- percpu_counter_inc(&pool->sp_threads_woken);
- trace_svc_wake_up(rqstp->rq_task->pid);
return;
}
rcu_read_unlock();
- set_bit(SP_CONGESTED, &pool->sp_flags);
}
+EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
static struct svc_pool *
svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
@@ -725,36 +728,38 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools];
}
-static struct task_struct *
-svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+static struct svc_pool *
+svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+ unsigned int *state)
{
+ struct svc_pool *pool;
unsigned int i;
- struct task_struct *task = NULL;
+
+retry:
+ pool = target_pool;
if (pool != NULL) {
- spin_lock_bh(&pool->sp_lock);
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
+ goto found_pool;
+ return NULL;
} else {
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
- spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_all_threads))
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
goto found_pool;
- spin_unlock_bh(&pool->sp_lock);
}
return NULL;
}
found_pool:
- if (!list_empty(&pool->sp_all_threads)) {
- struct svc_rqst *rqstp;
-
- rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
- set_bit(RQ_VICTIM, &rqstp->rq_flags);
- list_del_rcu(&rqstp->rq_all);
- task = rqstp->rq_task;
- }
- spin_unlock_bh(&pool->sp_lock);
- return task;
+ set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ set_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ if (!atomic_dec_and_test(&pool->sp_nrthreads))
+ return pool;
+ /* Nothing left in this pool any more */
+ clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ goto retry;
}
static int
@@ -795,18 +800,16 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
static int
svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- struct svc_rqst *rqstp;
- struct task_struct *task;
unsigned int state = serv->sv_nrthreads-1;
+ struct svc_pool *victim;
do {
- task = svc_pool_victim(serv, pool, &state);
- if (task == NULL)
+ victim = svc_pool_victim(serv, pool, &state);
+ if (!victim)
break;
- rqstp = kthread_data(task);
- /* Did we lose a race to svo_function threadfn? */
- if (kthread_stop(task) == -EINTR)
- svc_exit_thread(rqstp);
+ svc_pool_wake_idle_thread(victim);
+ wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS,
+ TASK_IDLE);
nrservs++;
} while (nrservs < 0);
return 0;
@@ -832,13 +835,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- if (pool == NULL) {
+ if (!pool)
nrservs -= serv->sv_nrthreads;
- } else {
- spin_lock_bh(&pool->sp_lock);
- nrservs -= pool->sp_nrthreads;
- spin_unlock_bh(&pool->sp_lock);
- }
+ else
+ nrservs -= atomic_read(&pool->sp_nrthreads);
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
@@ -924,11 +924,9 @@ svc_exit_thread(struct svc_rqst *rqstp)
struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool;
- spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads--;
- if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
- list_del_rcu(&rqstp->rq_all);
- spin_unlock_bh(&pool->sp_lock);
+ list_del_rcu(&rqstp->rq_all);
+
+ atomic_dec(&pool->sp_nrthreads);
spin_lock_bh(&serv->sv_lock);
serv->sv_nrthreads -= 1;
@@ -938,6 +936,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
svc_rqst_free(rqstp);
svc_put(serv);
+ /* That svc_put() cannot be the last, because the thread
+ * waiting for SP_VICTIM_REMAINS to clear must hold
+ * a reference. So it is still safe to access pool.
+ */
+ clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
}
EXPORT_SYMBOL_GPL(svc_exit_thread);
@@ -1544,24 +1547,20 @@ out_drop:
}
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
-/*
- * Process a backchannel RPC request that arrived over an existing
- * outbound connection
+/**
+ * svc_process_bc - process a reverse-direction RPC request
+ * @req: RPC request to be used for client-side processing
+ * @rqstp: server-side execution context
+ *
*/
-int
-bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
- struct svc_rqst *rqstp)
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
{
struct rpc_task *task;
int proc_error;
- int error;
-
- dprintk("svc: %s(%p)\n", __func__, req);
/* Build the svc_rqst used by the common processing routine */
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
- rqstp->rq_server = serv;
rqstp->rq_bc_net = req->rq_xprt->xprt_net;
rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
@@ -1590,10 +1589,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
* been processed by the caller.
*/
svcxdr_init_decode(rqstp);
- if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) {
- error = -EINVAL;
- goto out;
- }
+ if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2))
+ return;
/* Parse and execute the bc call */
proc_error = svc_process_common(rqstp);
@@ -1602,26 +1599,18 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
if (!proc_error) {
/* Processing error: drop the request */
xprt_free_bc_request(req);
- error = -EINVAL;
- goto out;
+ return;
}
/* Finally, send the reply synchronously */
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req);
- if (IS_ERR(task)) {
- error = PTR_ERR(task);
- goto out;
- }
+ if (IS_ERR(task))
+ return;
WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
- error = task->tk_status;
rpc_put_task(task);
-
-out:
- dprintk("svc: %s(), error=%d\n", __func__, error);
- return error;
}
-EXPORT_SYMBOL_GPL(bc_svc_process);
+EXPORT_SYMBOL_GPL(svc_process_bc);
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
/**
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 4cfe9640df48..fee83d1024bc 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -9,7 +9,6 @@
#include <linux/sched/mm.h>
#include <linux/errno.h>
#include <linux/freezer.h>
-#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/sunrpc/addr.h>
@@ -17,6 +16,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/bc_xprt.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <trace/events/sunrpc.h>
@@ -201,7 +201,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
kref_init(&xprt->xpt_ref);
xprt->xpt_server = serv;
INIT_LIST_HEAD(&xprt->xpt_list);
- INIT_LIST_HEAD(&xprt->xpt_ready);
INIT_LIST_HEAD(&xprt->xpt_deferred);
INIT_LIST_HEAD(&xprt->xpt_users);
mutex_init(&xprt->xpt_mutex);
@@ -472,9 +471,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server);
percpu_counter_inc(&pool->sp_sockets_queued);
- spin_lock_bh(&pool->sp_lock);
- list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
- spin_unlock_bh(&pool->sp_lock);
+ lwq_enqueue(&xprt->xpt_ready, &pool->sp_xprts);
svc_pool_wake_idle_thread(pool);
}
@@ -487,18 +484,9 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
{
struct svc_xprt *xprt = NULL;
- if (list_empty(&pool->sp_sockets))
- goto out;
-
- spin_lock_bh(&pool->sp_lock);
- if (likely(!list_empty(&pool->sp_sockets))) {
- xprt = list_first_entry(&pool->sp_sockets,
- struct svc_xprt, xpt_ready);
- list_del_init(&xprt->xpt_ready);
+ xprt = lwq_dequeue(&pool->sp_xprts, struct svc_xprt, xpt_ready);
+ if (xprt)
svc_xprt_get(xprt);
- }
- spin_unlock_bh(&pool->sp_lock);
-out:
return xprt;
}
@@ -674,7 +662,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
continue;
set_current_state(TASK_IDLE);
- if (kthread_should_stop()) {
+ if (svc_thread_should_stop(rqstp)) {
set_current_state(TASK_RUNNING);
return false;
}
@@ -699,7 +687,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp)
}
static bool
-rqst_should_sleep(struct svc_rqst *rqstp)
+svc_thread_should_sleep(struct svc_rqst *rqstp)
{
struct svc_pool *pool = rqstp->rq_pool;
@@ -708,65 +696,51 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return false;
/* was a socket queued? */
- if (!list_empty(&pool->sp_sockets))
+ if (!lwq_empty(&pool->sp_xprts))
return false;
/* are we shutting down? */
- if (kthread_should_stop())
+ if (svc_thread_should_stop(rqstp))
return false;
- /* are we freezing? */
- if (freezing(current))
- return false;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (svc_is_backchannel(rqstp)) {
+ if (!lwq_empty(&rqstp->rq_server->sv_cb_list))
+ return false;
+ }
+#endif
return true;
}
-static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp)
+static void svc_thread_wait_for_work(struct svc_rqst *rqstp)
{
- struct svc_pool *pool = rqstp->rq_pool;
-
- /* rq_xprt should be clear on entry */
- WARN_ON_ONCE(rqstp->rq_xprt);
-
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- set_current_state(TASK_IDLE);
- smp_mb__before_atomic();
- clear_bit(SP_CONGESTED, &pool->sp_flags);
- clear_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
-
- if (likely(rqst_should_sleep(rqstp)))
- schedule();
- else
+ struct svc_pool *pool = rqstp->rq_pool;
+
+ if (svc_thread_should_sleep(rqstp)) {
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
+ llist_add(&rqstp->rq_idle, &pool->sp_idle_threads);
+ if (likely(svc_thread_should_sleep(rqstp)))
+ schedule();
+
+ while (!llist_del_first_this(&pool->sp_idle_threads,
+ &rqstp->rq_idle)) {
+ /* Work just became available. This thread can only
+ * handle it after removing rqstp from the idle
+ * list. If that attempt failed, some other thread
+ * must have queued itself after finding no
+ * work to do, so that thread has taken responsibly
+ * for this new work. This thread can safely sleep
+ * until woken again.
+ */
+ schedule();
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
+ }
__set_current_state(TASK_RUNNING);
-
+ } else {
+ cond_resched();
+ }
try_to_freeze();
-
- set_bit(RQ_BUSY, &rqstp->rq_flags);
- smp_mb__after_atomic();
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- rqstp->rq_xprt = svc_xprt_dequeue(pool);
- if (rqstp->rq_xprt)
- goto out_found;
-
- if (kthread_should_stop())
- return NULL;
- return NULL;
-out_found:
- clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- /* Normally we will wait up to 5 seconds for any required
- * cache information to be provided.
- */
- if (!test_bit(SP_CONGESTED, &pool->sp_flags))
- rqstp->rq_chandle.thread_wait = 5*HZ;
- else
- rqstp->rq_chandle.thread_wait = 1*HZ;
- trace_svc_xprt_dequeue(rqstp);
- return rqstp->rq_xprt;
}
static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
@@ -785,7 +759,7 @@ static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt
svc_xprt_received(newxpt);
}
-static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
+static void svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
{
struct svc_serv *serv = rqstp->rq_server;
int len = 0;
@@ -826,11 +800,35 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+ if (len <= 0)
+ goto out;
+
+ trace_svc_xdr_recvfrom(&rqstp->rq_arg);
+
+ clear_bit(XPT_OLD, &xprt->xpt_flags);
+
+ rqstp->rq_chandle.defer = svc_defer;
+
+ if (serv->sv_stats)
+ serv->sv_stats->netcnt++;
+ percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived);
+ rqstp->rq_stime = ktime_get();
+ svc_process(rqstp);
} else
svc_xprt_received(xprt);
out:
- return len;
+ rqstp->rq_res.len = 0;
+ svc_xprt_release(rqstp);
+}
+
+static void svc_thread_wake_next(struct svc_rqst *rqstp)
+{
+ if (!svc_thread_should_sleep(rqstp))
+ /* More work pending after I dequeued some,
+ * wake another worker
+ */
+ svc_pool_wake_idle_thread(rqstp->rq_pool);
}
/**
@@ -843,44 +841,51 @@ out:
*/
void svc_recv(struct svc_rqst *rqstp)
{
- struct svc_xprt *xprt = NULL;
- struct svc_serv *serv = rqstp->rq_server;
- int len;
+ struct svc_pool *pool = rqstp->rq_pool;
if (!svc_alloc_arg(rqstp))
- goto out;
+ return;
- try_to_freeze();
- cond_resched();
- if (kthread_should_stop())
- goto out;
+ svc_thread_wait_for_work(rqstp);
- xprt = svc_get_next_xprt(rqstp);
- if (!xprt)
- goto out;
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
- len = svc_handle_xprt(rqstp, xprt);
+ if (svc_thread_should_stop(rqstp)) {
+ svc_thread_wake_next(rqstp);
+ return;
+ }
- /* No data, incomplete (TCP) read, or accept() */
- if (len <= 0)
- goto out_release;
+ rqstp->rq_xprt = svc_xprt_dequeue(pool);
+ if (rqstp->rq_xprt) {
+ struct svc_xprt *xprt = rqstp->rq_xprt;
- trace_svc_xdr_recvfrom(&rqstp->rq_arg);
+ svc_thread_wake_next(rqstp);
+ /* Normally we will wait up to 5 seconds for any required
+ * cache information to be provided. When there are no
+ * idle threads, we reduce the wait time.
+ */
+ if (pool->sp_idle_threads.first)
+ rqstp->rq_chandle.thread_wait = 5 * HZ;
+ else
+ rqstp->rq_chandle.thread_wait = 1 * HZ;
- clear_bit(XPT_OLD, &xprt->xpt_flags);
+ trace_svc_xprt_dequeue(rqstp);
+ svc_handle_xprt(rqstp, xprt);
+ }
- rqstp->rq_chandle.defer = svc_defer;
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ if (svc_is_backchannel(rqstp)) {
+ struct svc_serv *serv = rqstp->rq_server;
+ struct rpc_rqst *req;
- if (serv->sv_stats)
- serv->sv_stats->netcnt++;
- percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived);
- rqstp->rq_stime = ktime_get();
- svc_process(rqstp);
-out:
- return;
-out_release:
- rqstp->rq_res.len = 0;
- svc_xprt_release(rqstp);
+ req = lwq_dequeue(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ if (req) {
+ svc_thread_wake_next(rqstp);
+ svc_process_bc(req, rqstp);
+ }
+ }
+#endif
}
EXPORT_SYMBOL_GPL(svc_recv);
@@ -890,7 +895,6 @@ EXPORT_SYMBOL_GPL(svc_recv);
void svc_drop(struct svc_rqst *rqstp)
{
trace_svc_drop(rqstp);
- svc_xprt_release(rqstp);
}
EXPORT_SYMBOL_GPL(svc_drop);
@@ -906,8 +910,6 @@ void svc_send(struct svc_rqst *rqstp)
int status;
xprt = rqstp->rq_xprt;
- if (!xprt)
- return;
/* calculate over-all length */
xb = &rqstp->rq_res;
@@ -920,7 +922,6 @@ void svc_send(struct svc_rqst *rqstp)
status = xprt->xpt_ops->xpo_sendto(rqstp);
trace_svc_send(rqstp, status);
- svc_xprt_release(rqstp);
}
/*
@@ -1031,7 +1032,6 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_lock_bh(&serv->sv_lock);
list_del_init(&xprt->xpt_list);
- WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
@@ -1082,36 +1082,26 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
return ret;
}
-static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
+static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
{
- struct svc_pool *pool;
struct svc_xprt *xprt;
- struct svc_xprt *tmp;
int i;
for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[i];
-
- spin_lock_bh(&pool->sp_lock);
- list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
- if (xprt->xpt_net != net)
- continue;
- list_del_init(&xprt->xpt_ready);
- spin_unlock_bh(&pool->sp_lock);
- return xprt;
+ struct svc_pool *pool = &serv->sv_pools[i];
+ struct llist_node *q, **t1, *t2;
+
+ q = lwq_dequeue_all(&pool->sp_xprts);
+ lwq_for_each_safe(xprt, t1, t2, &q, xpt_ready) {
+ if (xprt->xpt_net == net) {
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ svc_delete_xprt(xprt);
+ xprt = NULL;
+ }
}
- spin_unlock_bh(&pool->sp_lock);
- }
- return NULL;
-}
-
-static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
-{
- struct svc_xprt *xprt;
- while ((xprt = svc_dequeue_net(serv, net))) {
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- svc_delete_xprt(xprt);
+ if (q)
+ lwq_enqueue_batch(q, &pool->sp_xprts);
}
}
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index e4d84a13c566..8c817e755262 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -263,11 +263,9 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
xprt_get(xprt);
- spin_lock(&bc_serv->sv_cb_lock);
- list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
- spin_unlock(&bc_serv->sv_cb_lock);
+ lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
- wake_up(&bc_serv->sv_cb_waitq);
+ svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
r_xprt->rx_stats.bcall_count++;
return;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 85c8bcaebb80..3b05f90a3e50 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -852,7 +852,8 @@ out_readfail:
if (ret == -EINVAL)
svc_rdma_send_error(rdma_xprt, ctxt, ret);
svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
- return ret;
+ svc_xprt_deferred_close(xprt);
+ return -ENOTCONN;
out_backchannel:
svc_rdma_handle_bc_reply(rqstp, ctxt);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 02f583ff9239..002483e60c19 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -139,8 +139,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
int wait_on_pending_writer(struct sock *sk, long *timeo)
{
- int rc = 0;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret, rc = 0;
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
@@ -154,9 +154,13 @@ int wait_on_pending_writer(struct sock *sk, long *timeo)
break;
}
- if (sk_wait_event(sk, timeo,
- !READ_ONCE(sk->sk_write_pending), &wait))
+ ret = sk_wait_event(sk, timeo,
+ !READ_ONCE(sk->sk_write_pending), &wait);
+ if (ret) {
+ if (ret < 0)
+ rc = ret;
break;
+ }
}
remove_wait_queue(sk_sleep(sk), &wait);
return rc;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d1fc295b83b5..e9d1e83a859d 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1291,6 +1291,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
long timeo;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1302,6 +1303,9 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
if (sk->sk_err)
return sock_error(sk);
+ if (ret < 0)
+ return ret;
+
if (!skb_queue_empty(&sk->sk_receive_queue)) {
tls_strp_check_rcv(&ctx->strp);
if (tls_strp_msg_ready(ctx))
@@ -1320,10 +1324,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
released = true;
add_wait_queue(sk_sleep(sk), &wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- sk_wait_event(sk, &timeo,
- tls_strp_msg_ready(ctx) ||
- !sk_psock_queue_empty(psock),
- &wait);
+ ret = sk_wait_event(sk, &timeo,
+ tls_strp_msg_ready(ctx) ||
+ !sk_psock_queue_empty(psock),
+ &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
remove_wait_queue(sk_sleep(sk), &wait);
@@ -1852,6 +1856,7 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
bool nonblock)
{
long timeo;
+ int ret;
timeo = sock_rcvtimeo(sk, nonblock);
@@ -1861,14 +1866,16 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
ctx->reader_contended = 1;
add_wait_queue(&ctx->wq, &wait);
- sk_wait_event(sk, &timeo,
- !READ_ONCE(ctx->reader_present), &wait);
+ ret = sk_wait_event(sk, &timeo,
+ !READ_ONCE(ctx->reader_present), &wait);
remove_wait_queue(&ctx->wq, &wait);
if (timeo <= 0)
return -EAGAIN;
if (signal_pending(current))
return sock_intr_errno(timeo);
+ if (ret < 0)
+ return ret;
}
WRITE_ONCE(ctx->reader_present, 1);
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index e95df847176b..b80bf681327b 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -555,6 +555,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
virtio_device_ready(vdev);
+ return 0;
+}
+
+static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
+{
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
mutex_unlock(&vsock->tx_lock);
@@ -569,7 +574,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
- return 0;
+ /* virtio_transport_send_pkt() can queue packets once
+ * the_virtio_vsock is set, but they won't be processed until
+ * vsock->tx_run is set to true. We queue vsock->send_pkt_work
+ * when initialization finishes to send those packets queued
+ * earlier.
+ * We don't need to queue the other workers (rx, event) because
+ * as long as we don't fill the queues with empty buffers, the
+ * host can't send us any notification.
+ */
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
}
static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
@@ -664,6 +678,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@@ -736,6 +751,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
out:
mutex_unlock(&the_virtio_vsock_mutex);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 64e861617110..acec41c1809a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1622,7 +1622,7 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
list_add_tail(&work->entry, &rdev->wiphy_work_list);
spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
- schedule_work(&rdev->wiphy_work);
+ queue_work(system_unbound_wq, &rdev->wiphy_work);
}
EXPORT_SYMBOL_GPL(wiphy_work_queue);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 3e2c398abddc..55a1d3633853 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -43,10 +43,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
cr.links[link_id].status = data->links[link_id].status;
+ cr.links[link_id].bss = data->links[link_id].bss;
+
WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
(!cr.ap_mld_addr || !cr.links[link_id].bss));
- cr.links[link_id].bss = data->links[link_id].bss;
if (!cr.links[link_id].bss)
continue;
cr.links[link_id].bssid = data->links[link_id].bss->bssid;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 939deecf0bbe..8210a6090ac1 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2125,7 +2125,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!res)
goto drop;
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
+ rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
if (data->bss_source == BSS_SOURCE_MBSSID) {
/* this is a nontransmitting bss, we need to add it to
diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
index b86474084690..e21cc71095bb 100644
--- a/net/xfrm/xfrm_interface_core.c
+++ b/net/xfrm/xfrm_interface_core.c
@@ -380,8 +380,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
skb->dev = dev;
if (err) {
- dev->stats.rx_errors++;
- dev->stats.rx_dropped++;
+ DEV_STATS_INC(dev, rx_errors);
+ DEV_STATS_INC(dev, rx_dropped);
return 0;
}
@@ -426,7 +426,6 @@ static int
xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
unsigned int length = skb->len;
struct net_device *tdev;
@@ -473,7 +472,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
tdev = dst->dev;
if (tdev == dev) {
- stats->collisions++;
+ DEV_STATS_INC(dev, collisions);
net_warn_ratelimited("%s: Local routing loop detected!\n",
dev->name);
goto tx_err_dst_release;
@@ -512,13 +511,13 @@ xmit:
if (net_xmit_eval(err) == 0) {
dev_sw_netstats_tx_add(dev, 1, length);
} else {
- stats->tx_errors++;
- stats->tx_aborted_errors++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
}
return 0;
tx_err_link_failure:
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_link_failure(skb);
tx_err_dst_release:
dst_release(dst);
@@ -528,7 +527,6 @@ tx_err_dst_release:
static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
- struct net_device_stats *stats = &xi->dev->stats;
struct dst_entry *dst = skb_dst(skb);
struct flowi fl;
int ret;
@@ -545,7 +543,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
if (dst->error) {
dst_release(dst);
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, dst);
@@ -561,7 +559,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
if (IS_ERR(rt)) {
- stats->tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
goto tx_err;
}
skb_dst_set(skb, &rt->dst);
@@ -580,8 +578,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
tx_err:
- stats->tx_errors++;
- stats->tx_dropped++;
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_dropped);
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d6b405782b63..d24b4d4f620e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -851,7 +851,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
struct hlist_node *newpos = NULL;
bool matches_s, matches_d;
- if (!policy->bydst_reinsert)
+ if (policy->walk.dead || !policy->bydst_reinsert)
continue;
WARN_ON_ONCE(policy->family != family);
@@ -1256,8 +1256,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
struct xfrm_pol_inexact_bin *bin;
u8 dbits, sbits;
+ if (policy->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(policy->index);
- if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
+ if (dir >= XFRM_POLICY_MAX)
continue;
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
@@ -1372,8 +1375,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
* of an absolute inpredictability of ordering of rules. This will not pass. */
static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
{
- static u32 idx_generator;
-
for (;;) {
struct hlist_head *list;
struct xfrm_policy *p;
@@ -1381,8 +1382,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
int found;
if (!index) {
- idx = (idx_generator | dir);
- idx_generator += 8;
+ idx = (net->xfrm.idx_generator | dir);
+ net->xfrm.idx_generator += 8;
} else {
idx = index;
index = 0;
@@ -1823,9 +1824,11 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ if (pol->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(pol->index);
- if (pol->walk.dead ||
- dir >= XFRM_POLICY_MAX ||
+ if (dir >= XFRM_POLICY_MAX ||
pol->type != type)
continue;
@@ -1862,9 +1865,11 @@ int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
again:
list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ if (pol->walk.dead)
+ continue;
+
dir = xfrm_policy_id2dir(pol->index);
- if (pol->walk.dead ||
- dir >= XFRM_POLICY_MAX ||
+ if (dir >= XFRM_POLICY_MAX ||
pol->xdo.dev != dev)
continue;
@@ -3215,7 +3220,7 @@ no_transform:
}
for (i = 0; i < num_pols; i++)
- pols[i]->curlft.use_time = ktime_get_real_seconds();
+ WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
if (num_xfrms < 0) {
/* Prohibit the flow */
diff --git a/rust/Makefile b/rust/Makefile
index 87958e864be0..7dbf9abe0d01 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -93,15 +93,14 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
# and then retouch the generated files.
rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
rustdoc-alloc rustdoc-kernel
- $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)
- $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)
+ $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)/static.files/
+ $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)/static.files/
$(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \
- -e 's:rust-logo\.svg:logo.svg:g' \
- -e 's:rust-logo\.png:logo.svg:g' \
- -e 's:favicon\.svg:logo.svg:g' \
- -e 's:<link rel="alternate icon" type="image/png" href="[./]*favicon-(16x16|32x32)\.png">::g'
- $(Q)echo '.logo-container > img { object-fit: contain; }' \
- >> $(rustdoc_output)/rustdoc.css
+ -e 's:rust-logo-[0-9a-f]+\.svg:logo.svg:g' \
+ -e 's:favicon-[0-9a-f]+\.svg:logo.svg:g' \
+ -e 's:<link rel="alternate icon" type="image/png" href="[/.]+/static\.files/favicon-(16x16|32x32)-[0-9a-f]+\.png">::g'
+ $(Q)for f in $(rustdoc_output)/static.files/rustdoc-*.css; do \
+ echo ".logo-container > img { object-fit: contain; }" >> $$f; done
rustdoc-macros: private rustdoc_host = yes
rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
@@ -290,6 +289,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
-fzero-call-used-regs=% -fno-stack-clash-protection \
-fno-inline-functions-called-once -fsanitize=bounds-strict \
+ -fstrict-flex-arrays=% \
--param=% --param asan-%
# Derived from `scripts/Makefile.clang`.
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 05fcab6abfe6..032b64543953 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -37,7 +37,7 @@ pub mod code {
declare_err!(E2BIG, "Argument list too long.");
declare_err!(ENOEXEC, "Exec format error.");
declare_err!(EBADF, "Bad file number.");
- declare_err!(ECHILD, "Exec format error.");
+ declare_err!(ECHILD, "No child processes.");
declare_err!(EAGAIN, "Try again.");
declare_err!(ENOMEM, "Out of memory.");
declare_err!(EACCES, "Permission denied.");
@@ -133,7 +133,7 @@ impl Error {
/// Returns the error encoded as a pointer.
#[allow(dead_code)]
pub(crate) fn to_ptr<T>(self) -> *mut T {
- // SAFETY: self.0 is a valid error due to its invariant.
+ // SAFETY: `self.0` is a valid error due to its invariant.
unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
}
diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch
index dc39d938ea77..188412aa2757 100644
--- a/scripts/const_structs.checkpatch
+++ b/scripts/const_structs.checkpatch
@@ -94,3 +94,4 @@ vm_operations_struct
wacom_features
watchdog_ops
wd_ops
+xattr_handler
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index bd6a910f6528..53a0070ff5df 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -226,7 +226,7 @@ static int __aafs_setup_d_inode(struct inode *dir, struct dentry *dentry,
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = iops ? iops : &simple_dir_inode_operations;
@@ -1557,7 +1557,8 @@ void __aafs_profile_migrate_dents(struct aa_profile *old,
if (new->dents[i]) {
struct inode *inode = d_inode(new->dents[i]);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode,
+ inode_set_ctime_current(inode));
}
old->dents[i] = NULL;
}
@@ -2543,7 +2544,7 @@ static int aa_mk_null_file(struct dentry *parent)
inode->i_ino = get_next_ino();
inode->i_mode = S_IFCHR | S_IRUGO | S_IWUGO;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO,
MKDEV(MEM_MAJOR, 3));
d_instantiate(dentry, inode);
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 8b8846073e14..913ec8d0eb63 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -89,10 +89,10 @@ void __aa_loaddata_update(struct aa_loaddata *data, long revision)
struct inode *inode;
inode = d_inode(data->dents[AAFS_LOADDATA_DIR]);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]);
- inode->i_mtime = inode_set_ctime_current(inode);
+ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
}
diff --git a/security/inode.c b/security/inode.c
index 3aa75fffa8c9..9e7cde913667 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -145,7 +145,7 @@ static struct dentry *securityfs_create_dentry(const char *name, umode_t mode,
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
+ simple_inode_init_ts(inode);
inode->i_private = data;
if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 6fa640263216..6c596ae7fef9 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1198,7 +1198,7 @@ static struct inode *sel_make_inode(struct super_block *sb, umode_t mode)
if (ret) {
ret->i_mode = mode;
- ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret);
+ simple_inode_init_ts(ret);
}
return ret;
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index bd9ddf412b46..9a69236fa207 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -3527,7 +3527,7 @@ static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
if (runtime->state == SNDRV_PCM_STATE_OPEN ||
runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
- if (!to->user_backed)
+ if (!user_backed_iter(to))
return -EINVAL;
if (to->nr_segs > 1024 || to->nr_segs != runtime->channels)
return -EINVAL;
@@ -3567,7 +3567,7 @@ static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
if (runtime->state == SNDRV_PCM_STATE_OPEN ||
runtime->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
- if (!from->user_backed)
+ if (!user_backed_iter(from))
return -EINVAL;
if (from->nr_segs > 128 || from->nr_segs != runtime->channels ||
!frame_aligned(runtime, iov->iov_len))
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3eeecf67c17b..9677c09cf7a9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7078,6 +7078,24 @@ static void alc287_fixup_bind_dacs(struct hda_codec *codec,
0x0); /* Make sure 0x14 was disable */
}
}
+/* Fix none verb table of Headset Mic pin */
+static void alc_fixup_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x19, 0x03a1103c },
+ { }
+ };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
+ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+ break;
+ }
+}
enum {
@@ -7344,6 +7362,7 @@ enum {
ALC245_FIXUP_HP_X360_MUTE_LEDS,
ALC287_FIXUP_THINKPAD_I2S_SPK,
ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ ALC2XX_FIXUP_HEADSET_MIC,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9448,6 +9467,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
},
+ [ALC2XX_FIXUP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mic,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9722,6 +9745,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
@@ -9791,6 +9815,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
@@ -10750,6 +10775,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
{0x19, 0x40000000},
{0x1a, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
+ {0x19, 0x40000000}),
{}
};
diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
index f2e7c6d0be46..f9059780b7a7 100644
--- a/sound/soc/codecs/cs35l56.c
+++ b/sound/soc/codecs/cs35l56.c
@@ -706,7 +706,7 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56)
mutex_lock(&cs35l56->base.irq_lock);
- init_completion(&cs35l56->init_completion);
+ reinit_completion(&cs35l56->init_completion);
cs35l56->soft_resetting = true;
cs35l56_system_reset(&cs35l56->base, !!cs35l56->sdw_peripheral);
@@ -1186,6 +1186,12 @@ post_soft_reset:
/* Registers could be dirty after soft reset or SoundWire enumeration */
regcache_sync(cs35l56->base.regmap);
+ /* Set ASP1 DOUT to high-impedance when it is not transmitting audio data. */
+ ret = regmap_set_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL3,
+ CS35L56_ASP1_DOUT_HIZ_CTRL_MASK);
+ if (ret)
+ return dev_err_probe(cs35l56->base.dev, ret, "Failed to write ASP1_CONTROL3\n");
+
cs35l56->base.init_done = true;
complete(&cs35l56->init_completion);
diff --git a/sound/soc/codecs/cs42l42-sdw.c b/sound/soc/codecs/cs42l42-sdw.c
index 974bae4abfad..94a66a325303 100644
--- a/sound/soc/codecs/cs42l42-sdw.c
+++ b/sound/soc/codecs/cs42l42-sdw.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index 92e37bc1df9d..9f5f1a92561d 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -34,7 +34,7 @@ static const unsigned int cs42l43_accdet_db_ms[] = {
static const unsigned int cs42l43_accdet_ramp_ms[] = { 10, 40, 90, 170 };
static const unsigned int cs42l43_accdet_bias_sense[] = {
- 14, 23, 41, 50, 60, 68, 86, 95, 0,
+ 14, 24, 43, 52, 61, 71, 90, 99, 0,
};
static int cs42l43_find_index(struct cs42l43_codec *priv, const char * const prop,
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 581b334a6631..3bbe85091649 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -59,9 +59,6 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
bool micbias_up = false;
int retries = 0;
- /* Disable ground switch */
- snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
/* Drive headphones/lineout */
snd_soc_component_update_bits(component, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_OE_MASK,
@@ -155,9 +152,6 @@ static void da7219_aad_hptest_work(struct work_struct *work)
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
}
- /* Disable ground switch */
- snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
/* Ensure gain ramping at fastest rate */
gain_ramp_ctrl = snd_soc_component_read(component, DA7219_GAIN_RAMP_CTRL);
snd_soc_component_write(component, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
@@ -421,6 +415,11 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
* handle a removal, and we can check at the end of
* hptest if we have a valid result or not.
*/
+
+ cancel_delayed_work_sync(&da7219_aad->jack_det_work);
+ /* Disable ground switch */
+ snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
if (statusa & DA7219_JACK_TYPE_STS_MASK) {
report |= SND_JACK_HEADSET;
mask |= SND_JACK_HEADSET | SND_JACK_LINEOUT;
diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
index ec6859ec0d38..fff4a8b862a7 100644
--- a/sound/soc/codecs/lpass-wsa-macro.c
+++ b/sound/soc/codecs/lpass-wsa-macro.c
@@ -1675,12 +1675,12 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
u16 boost_path_ctl, boost_path_cfg1;
u16 reg, reg_mix;
- if (!strcmp(w->name, "WSA_RX INT0 CHAIN")) {
+ if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT0 CHAIN")) {
boost_path_ctl = CDC_WSA_BOOST0_BOOST_PATH_CTL;
boost_path_cfg1 = CDC_WSA_RX0_RX_PATH_CFG1;
reg = CDC_WSA_RX0_RX_PATH_CTL;
reg_mix = CDC_WSA_RX0_RX_PATH_MIX_CTL;
- } else if (!strcmp(w->name, "WSA_RX INT1 CHAIN")) {
+ } else if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT1 CHAIN")) {
boost_path_ctl = CDC_WSA_BOOST1_BOOST_PATH_CTL;
boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
reg = CDC_WSA_RX1_RX_PATH_CTL;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 1a137ca3f496..7938b52d741d 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3257,6 +3257,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
+ regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
+ RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
}
rt5645_irq(0, rt5645);
diff --git a/sound/soc/codecs/tas2780.c b/sound/soc/codecs/tas2780.c
index 86bd6c18a944..41076be23854 100644
--- a/sound/soc/codecs/tas2780.c
+++ b/sound/soc/codecs/tas2780.c
@@ -39,7 +39,7 @@ static void tas2780_reset(struct tas2780_priv *tas2780)
usleep_range(2000, 2050);
}
- snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
+ ret = snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
TAS2780_RST);
if (ret)
dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",
diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
index 6951120057e5..a1f04010da95 100644
--- a/sound/soc/codecs/wcd938x-sdw.c
+++ b/sound/soc/codecs/wcd938x-sdw.c
@@ -1278,7 +1278,31 @@ static int wcd9380_probe(struct sdw_slave *pdev,
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- return component_add(dev, &wcd938x_sdw_component_ops);
+ ret = component_add(dev, &wcd938x_sdw_component_ops);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ return ret;
+}
+
+static int wcd9380_remove(struct sdw_slave *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ component_del(dev, &wcd938x_sdw_component_ops);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ return 0;
}
static const struct sdw_device_id wcd9380_slave_id[] = {
@@ -1320,6 +1344,7 @@ static const struct dev_pm_ops wcd938x_sdw_pm_ops = {
static struct sdw_driver wcd9380_codec_driver = {
.probe = wcd9380_probe,
+ .remove = wcd9380_remove,
.ops = &wcd9380_slave_ops,
.id_table = wcd9380_slave_id,
.driver = {
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index a3c680661377..d27b919c63b4 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -3325,8 +3325,10 @@ static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device
return dev_err_probe(dev, ret, "Failed to get supplies\n");
ret = regulator_bulk_enable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
- if (ret)
+ if (ret) {
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
return dev_err_probe(dev, ret, "Failed to enable supplies\n");
+ }
wcd938x_dt_parse_micbias_info(dev, wcd938x);
@@ -3435,7 +3437,8 @@ static int wcd938x_bind(struct device *dev)
wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode);
if (!wcd938x->rxdev) {
dev_err(dev, "could not find slave with matching of node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unbind;
}
wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
@@ -3443,46 +3446,47 @@ static int wcd938x_bind(struct device *dev)
wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
if (!wcd938x->txdev) {
dev_err(dev, "could not find txslave with matching of node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_rxdev;
}
wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
- if (!wcd938x->tx_sdw_dev) {
- dev_err(dev, "could not get txslave with matching of dev\n");
- return -EINVAL;
- }
/* As TX is main CSR reg interface, which should not be suspended first.
* expicilty add the dependency link */
if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink tx and rx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_txdev;
}
if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink wcd and tx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_rxtx_link;
}
if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink wcd and rx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_tx_link;
}
wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
if (!wcd938x->regmap) {
dev_err(dev, "could not get TX device regmap\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_rx_link;
}
ret = wcd938x_irq_init(wcd938x, dev);
if (ret) {
dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
- return ret;
+ goto err_remove_rx_link;
}
wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
@@ -3491,27 +3495,45 @@ static int wcd938x_bind(struct device *dev)
ret = wcd938x_set_micbias_data(wcd938x);
if (ret < 0) {
dev_err(dev, "%s: bad micbias pdata\n", __func__);
- return ret;
+ goto err_remove_rx_link;
}
ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x,
wcd938x_dais, ARRAY_SIZE(wcd938x_dais));
- if (ret)
+ if (ret) {
dev_err(dev, "%s: Codec registration failed\n",
__func__);
+ goto err_remove_rx_link;
+ }
- return ret;
+ return 0;
+err_remove_rx_link:
+ device_link_remove(dev, wcd938x->rxdev);
+err_remove_tx_link:
+ device_link_remove(dev, wcd938x->txdev);
+err_remove_rxtx_link:
+ device_link_remove(wcd938x->rxdev, wcd938x->txdev);
+err_put_txdev:
+ put_device(wcd938x->txdev);
+err_put_rxdev:
+ put_device(wcd938x->rxdev);
+err_unbind:
+ component_unbind_all(dev, wcd938x);
+
+ return ret;
}
static void wcd938x_unbind(struct device *dev)
{
struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+ snd_soc_unregister_component(dev);
device_link_remove(dev, wcd938x->txdev);
device_link_remove(dev, wcd938x->rxdev);
device_link_remove(wcd938x->rxdev, wcd938x->txdev);
- snd_soc_unregister_component(dev);
+ put_device(wcd938x->txdev);
+ put_device(wcd938x->rxdev);
component_unbind_all(dev, wcd938x);
}
@@ -3572,13 +3594,13 @@ static int wcd938x_probe(struct platform_device *pdev)
ret = wcd938x_add_slave_components(wcd938x, dev, &match);
if (ret)
- return ret;
+ goto err_disable_regulators;
wcd938x_reset(wcd938x);
ret = component_master_add_with_match(dev, &wcd938x_comp_ops, match);
if (ret)
- return ret;
+ goto err_disable_regulators;
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
@@ -3588,11 +3610,27 @@ static int wcd938x_probe(struct platform_device *pdev)
pm_runtime_idle(dev);
return 0;
+
+err_disable_regulators:
+ regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+
+ return ret;
}
static void wcd938x_remove(struct platform_device *pdev)
{
- component_master_del(&pdev->dev, &wcd938x_comp_ops);
+ struct device *dev = &pdev->dev;
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+
+ component_master_del(dev, &wcd938x_comp_ops);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
}
#if defined(CONFIG_OF)
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 22c004179214..9ea4be56d3b7 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -917,7 +917,7 @@ static int jh7110_i2stx0_clk_cfg(struct i2s_clk_config_data *config)
static int dw_i2s_probe(struct platform_device *pdev)
{
- const struct i2s_platform_data *pdata = of_device_get_match_data(&pdev->dev);
+ const struct i2s_platform_data *pdata = pdev->dev.platform_data;
struct dw_i2s_dev *dev;
struct resource *res;
int ret, irq;
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index b70034c07eee..b8a3cb8b7597 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -773,7 +773,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
if (IS_ERR(priv->extclk)) {
ret = PTR_ERR(priv->extclk);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto err_priv;
priv->extclk = NULL;
}
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index ba7c0ae82e00..566033f7dd2e 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -242,6 +242,7 @@ int snd_soc_component_notify_control(struct snd_soc_component *component,
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
struct snd_kcontrol *kctl;
+ /* When updating, change also snd_soc_dapm_widget_name_cmp() */
if (component->name_prefix)
snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl);
else
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f07e83678373..312e55579831 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2728,6 +2728,18 @@ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_update_dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm);
+ const char *wname = widget->name;
+
+ if (component->name_prefix)
+ wname += strlen(component->name_prefix) + 1; /* plus space */
+
+ return strcmp(wname, s);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_widget_name_cmp);
+
/*
* dapm_update_widget_flags() - Re-compute widget sink and source flags
* @w: The widget for which to update the flags
diff --git a/tools/arch/x86/include/uapi/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
index 4798f9d18fe8..9de35df1afc3 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_32.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_32.h
@@ -26,6 +26,6 @@
#ifndef __NR_setns
#define __NR_setns 346
#endif
-#ifdef __NR_seccomp
+#ifndef __NR_seccomp
#define __NR_seccomp 354
#endif
diff --git a/tools/build/feature/test-llvm.cpp b/tools/build/feature/test-llvm.cpp
new file mode 100644
index 000000000000..88a3d1bdd9f6
--- /dev/null
+++ b/tools/build/feature/test-llvm.cpp
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/raw_ostream.h"
+#define NUM_VERSION (((LLVM_VERSION_MAJOR) << 16) + (LLVM_VERSION_MINOR << 8) + LLVM_VERSION_PATCH)
+
+#if NUM_VERSION < 0x030900
+# error "LLVM version too low"
+#endif
+int main()
+{
+ llvm::errs() << "Hello World!\n";
+ llvm::llvm_shutdown();
+ return 0;
+}
diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
new file mode 100644
index 000000000000..83971b3cbfce
--- /dev/null
+++ b/tools/include/linux/rwsem.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _TOOLS__RWSEM_H
+#define _TOOLS__RWSEM_H
+
+#include <pthread.h>
+
+struct rw_semaphore {
+ pthread_rwlock_t lock;
+};
+
+static inline int init_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_init(&sem->lock, NULL);
+}
+
+static inline int exit_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_destroy(&sem->lock);
+}
+
+static inline int down_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_rdlock(&sem->lock);
+}
+
+static inline int up_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+
+static inline int down_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_wrlock(&sem->lock);
+}
+
+static inline int up_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+#endif /* _TOOLS_RWSEM_H */
diff --git a/tools/include/nolibc/arch-i386.h b/tools/include/nolibc/arch-i386.h
index 64415b9fac77..28c26a00a762 100644
--- a/tools/include/nolibc/arch-i386.h
+++ b/tools/include/nolibc/arch-i386.h
@@ -167,7 +167,9 @@ void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_
__asm__ volatile (
"xor %ebp, %ebp\n" /* zero the stack frame */
"mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
- "and $-16, %esp\n" /* last pushed argument must be 16-byte aligned */
+ "add $12, %esp\n" /* avoid over-estimating after the 'and' & 'sub' below */
+ "and $-16, %esp\n" /* the %esp must be 16-byte aligned on 'call' */
+ "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */
"push %eax\n" /* push arg1 on stack to support plain stack modes too */
"call _start_c\n" /* transfer to c runtime */
"hlt\n" /* ensure it does not return */
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
index a5f33fef1672..a05655b4ce1d 100644
--- a/tools/include/nolibc/crt.h
+++ b/tools/include/nolibc/crt.h
@@ -13,6 +13,7 @@ const unsigned long *_auxv __attribute__((weak));
static void __stack_chk_init(void);
static void exit(int);
+__attribute__((weak))
void _start_c(long *sp)
{
long argc;
diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps
index f842bc66b967..64d139400db1 100644
--- a/tools/net/ynl/Makefile.deps
+++ b/tools/net/ynl/Makefile.deps
@@ -18,3 +18,4 @@ CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h)
CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h)
CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h)
CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h)
+CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_H,nfsd.h)
diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile
index f8817d2e56e4..c1935b01902e 100644
--- a/tools/net/ynl/generated/Makefile
+++ b/tools/net/ynl/generated/Makefile
@@ -14,7 +14,7 @@ YNL_GEN_ARG_ethtool:=--user-header linux/ethtool_netlink.h \
TOOL:=../ynl-gen-c.py
-GENS:=ethtool devlink handshake fou netdev
+GENS:=ethtool devlink handshake fou netdev nfsd
SRCS=$(patsubst %,%-user.c,${GENS})
HDRS=$(patsubst %,%-user.h,${GENS})
OBJS=$(patsubst %,%-user.o,${GENS})
diff --git a/tools/net/ynl/generated/devlink-user.c b/tools/net/ynl/generated/devlink-user.c
index 3a8d8499fab6..2cb2518500cb 100644
--- a/tools/net/ynl/generated/devlink-user.c
+++ b/tools/net/ynl/generated/devlink-user.c
@@ -16,19 +16,19 @@
static const char * const devlink_op_strmap[] = {
[3] = "get",
[7] = "port-get",
- [DEVLINK_CMD_SB_GET] = "sb-get",
- [DEVLINK_CMD_SB_POOL_GET] = "sb-pool-get",
- [DEVLINK_CMD_SB_PORT_POOL_GET] = "sb-port-pool-get",
- [DEVLINK_CMD_SB_TC_POOL_BIND_GET] = "sb-tc-pool-bind-get",
+ [13] = "sb-get",
+ [17] = "sb-pool-get",
+ [21] = "sb-port-pool-get",
+ [25] = "sb-tc-pool-bind-get",
[DEVLINK_CMD_PARAM_GET] = "param-get",
[DEVLINK_CMD_REGION_GET] = "region-get",
[DEVLINK_CMD_INFO_GET] = "info-get",
[DEVLINK_CMD_HEALTH_REPORTER_GET] = "health-reporter-get",
- [DEVLINK_CMD_TRAP_GET] = "trap-get",
- [DEVLINK_CMD_TRAP_GROUP_GET] = "trap-group-get",
- [DEVLINK_CMD_TRAP_POLICER_GET] = "trap-policer-get",
- [DEVLINK_CMD_RATE_GET] = "rate-get",
- [DEVLINK_CMD_LINECARD_GET] = "linecard-get",
+ [63] = "trap-get",
+ [67] = "trap-group-get",
+ [71] = "trap-policer-get",
+ [76] = "rate-get",
+ [80] = "linecard-get",
[DEVLINK_CMD_SELFTESTS_GET] = "selftests-get",
};
@@ -838,7 +838,7 @@ devlink_sb_get(struct ynl_sock *ys, struct devlink_sb_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_GET;
+ yrs.rsp_cmd = 13;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -876,7 +876,7 @@ devlink_sb_get_dump(struct ynl_sock *ys, struct devlink_sb_get_req_dump *req)
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_get_list);
yds.cb = devlink_sb_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_GET;
+ yds.rsp_cmd = 13;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_GET, 1);
@@ -987,7 +987,7 @@ devlink_sb_pool_get(struct ynl_sock *ys, struct devlink_sb_pool_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_pool_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+ yrs.rsp_cmd = 17;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -1026,7 +1026,7 @@ devlink_sb_pool_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_pool_get_list);
yds.cb = devlink_sb_pool_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+ yds.rsp_cmd = 17;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_POOL_GET, 1);
@@ -1147,7 +1147,7 @@ devlink_sb_port_pool_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_port_pool_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+ yrs.rsp_cmd = 21;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -1187,7 +1187,7 @@ devlink_sb_port_pool_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_port_pool_get_list);
yds.cb = devlink_sb_port_pool_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+ yds.rsp_cmd = 21;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_GET, 1);
@@ -1316,7 +1316,7 @@ devlink_sb_tc_pool_bind_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+ yrs.rsp_cmd = 25;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -1356,7 +1356,7 @@ devlink_sb_tc_pool_bind_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_sb_tc_pool_bind_get_list);
yds.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+ yds.rsp_cmd = 25;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_GET, 1);
@@ -2183,7 +2183,7 @@ devlink_trap_get(struct ynl_sock *ys, struct devlink_trap_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_trap_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+ yrs.rsp_cmd = 63;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2223,7 +2223,7 @@ devlink_trap_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_trap_get_list);
yds.cb = devlink_trap_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+ yds.rsp_cmd = 63;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GET, 1);
@@ -2336,7 +2336,7 @@ devlink_trap_group_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_trap_group_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+ yrs.rsp_cmd = 67;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2376,7 +2376,7 @@ devlink_trap_group_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_trap_group_get_list);
yds.cb = devlink_trap_group_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+ yds.rsp_cmd = 67;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_GET, 1);
@@ -2483,7 +2483,7 @@ devlink_trap_policer_get(struct ynl_sock *ys,
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_trap_policer_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+ yrs.rsp_cmd = 71;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2523,7 +2523,7 @@ devlink_trap_policer_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_trap_policer_get_list);
yds.cb = devlink_trap_policer_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+ yds.rsp_cmd = 71;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_GET, 1);
@@ -2642,7 +2642,7 @@ devlink_rate_get(struct ynl_sock *ys, struct devlink_rate_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_rate_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_RATE_GET;
+ yrs.rsp_cmd = 76;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2682,7 +2682,7 @@ devlink_rate_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_rate_get_list);
yds.cb = devlink_rate_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_RATE_GET;
+ yds.rsp_cmd = 76;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_RATE_GET, 1);
@@ -2786,7 +2786,7 @@ devlink_linecard_get(struct ynl_sock *ys, struct devlink_linecard_get_req *req)
rsp = calloc(1, sizeof(*rsp));
yrs.yarg.data = rsp;
yrs.cb = devlink_linecard_get_rsp_parse;
- yrs.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+ yrs.rsp_cmd = 80;
err = ynl_exec(ys, nlh, &yrs);
if (err < 0)
@@ -2825,7 +2825,7 @@ devlink_linecard_get_dump(struct ynl_sock *ys,
yds.ys = ys;
yds.alloc_sz = sizeof(struct devlink_linecard_get_list);
yds.cb = devlink_linecard_get_rsp_parse;
- yds.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+ yds.rsp_cmd = 80;
yds.rsp_policy = &devlink_nest;
nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_LINECARD_GET, 1);
diff --git a/tools/net/ynl/generated/nfsd-user.c b/tools/net/ynl/generated/nfsd-user.c
new file mode 100644
index 000000000000..fec6828680ce
--- /dev/null
+++ b/tools/net/ynl/generated/nfsd-user.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN user source */
+
+#include <stdlib.h>
+#include <string.h>
+#include "nfsd-user.h"
+#include "ynl.h"
+#include <linux/nfsd_netlink.h>
+
+#include <libmnl/libmnl.h>
+#include <linux/genetlink.h>
+
+/* Enums */
+static const char * const nfsd_op_strmap[] = {
+ [NFSD_CMD_RPC_STATUS_GET] = "rpc-status-get",
+};
+
+const char *nfsd_op_str(int op)
+{
+ if (op < 0 || op >= (int)MNL_ARRAY_SIZE(nfsd_op_strmap))
+ return NULL;
+ return nfsd_op_strmap[op];
+}
+
+/* Policies */
+struct ynl_policy_attr nfsd_rpc_status_policy[NFSD_A_RPC_STATUS_MAX + 1] = {
+ [NFSD_A_RPC_STATUS_XID] = { .name = "xid", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_FLAGS] = { .name = "flags", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_PROG] = { .name = "prog", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_VERSION] = { .name = "version", .type = YNL_PT_U8, },
+ [NFSD_A_RPC_STATUS_PROC] = { .name = "proc", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_SERVICE_TIME] = { .name = "service_time", .type = YNL_PT_U64, },
+ [NFSD_A_RPC_STATUS_PAD] = { .name = "pad", .type = YNL_PT_IGNORE, },
+ [NFSD_A_RPC_STATUS_SADDR4] = { .name = "saddr4", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_DADDR4] = { .name = "daddr4", .type = YNL_PT_U32, },
+ [NFSD_A_RPC_STATUS_SADDR6] = { .name = "saddr6", .type = YNL_PT_BINARY,},
+ [NFSD_A_RPC_STATUS_DADDR6] = { .name = "daddr6", .type = YNL_PT_BINARY,},
+ [NFSD_A_RPC_STATUS_SPORT] = { .name = "sport", .type = YNL_PT_U16, },
+ [NFSD_A_RPC_STATUS_DPORT] = { .name = "dport", .type = YNL_PT_U16, },
+ [NFSD_A_RPC_STATUS_COMPOUND_OPS] = { .name = "compound-ops", .type = YNL_PT_U32, },
+};
+
+struct ynl_policy_nest nfsd_rpc_status_nest = {
+ .max_attr = NFSD_A_RPC_STATUS_MAX,
+ .table = nfsd_rpc_status_policy,
+};
+
+/* Common nested types */
+/* ============== NFSD_CMD_RPC_STATUS_GET ============== */
+/* NFSD_CMD_RPC_STATUS_GET - dump */
+void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp)
+{
+ struct nfsd_rpc_status_get_list *next = rsp;
+
+ while ((void *)next != YNL_LIST_END) {
+ rsp = next;
+ next = rsp->next;
+
+ free(rsp->obj.saddr6);
+ free(rsp->obj.daddr6);
+ free(rsp->obj.compound_ops);
+ free(rsp);
+ }
+}
+
+struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys)
+{
+ struct ynl_dump_state yds = {};
+ struct nlmsghdr *nlh;
+ int err;
+
+ yds.ys = ys;
+ yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_list);
+ yds.cb = nfsd_rpc_status_get_rsp_parse;
+ yds.rsp_cmd = NFSD_CMD_RPC_STATUS_GET;
+ yds.rsp_policy = &nfsd_rpc_status_nest;
+
+ nlh = ynl_gemsg_start_dump(ys, ys->family_id, NFSD_CMD_RPC_STATUS_GET, 1);
+
+ err = ynl_exec_dump(ys, nlh, &yds);
+ if (err < 0)
+ goto free_list;
+
+ return yds.first;
+
+free_list:
+ nfsd_rpc_status_get_list_free(yds.first);
+ return NULL;
+}
+
+const struct ynl_family ynl_nfsd_family = {
+ .name = "nfsd",
+};
diff --git a/tools/net/ynl/generated/nfsd-user.h b/tools/net/ynl/generated/nfsd-user.h
new file mode 100644
index 000000000000..b6b69501031a
--- /dev/null
+++ b/tools/net/ynl/generated/nfsd-user.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/nfsd.yaml */
+/* YNL-GEN user header */
+
+#ifndef _LINUX_NFSD_GEN_H
+#define _LINUX_NFSD_GEN_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <linux/types.h>
+#include <linux/nfsd_netlink.h>
+
+struct ynl_sock;
+
+extern const struct ynl_family ynl_nfsd_family;
+
+/* Enums */
+const char *nfsd_op_str(int op);
+
+/* Common nested types */
+/* ============== NFSD_CMD_RPC_STATUS_GET ============== */
+/* NFSD_CMD_RPC_STATUS_GET - dump */
+struct nfsd_rpc_status_get_list {
+ struct nfsd_rpc_status_get_list *next;
+ struct nfsd_rpc_status_get_rsp obj __attribute__ ((aligned (8)));
+};
+
+void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp);
+
+struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys);
+
+#endif /* _LINUX_NFSD_GEN_H */
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index e45c7cb1d5bc..e92f67383dde 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -14,6 +14,8 @@ NORETURN(__stack_chk_fail)
NORETURN(__ubsan_handle_builtin_unreachable)
NORETURN(arch_call_rest_init)
NORETURN(arch_cpu_idle_dead)
+NORETURN(bch2_trans_in_restart_error)
+NORETURN(bch2_trans_restart_error)
NORETURN(cpu_bringup_and_idle)
NORETURN(cpu_startup_entry)
NORETURN(do_exit)
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v0.c b/tools/perf/dlfilters/dlfilter-test-api-v0.c
index 72f263d49121..4083b1abeaab 100644
--- a/tools/perf/dlfilters/dlfilter-test-api-v0.c
+++ b/tools/perf/dlfilters/dlfilter-test-api-v0.c
@@ -289,6 +289,15 @@ static int check_attr(void *ctx)
return 0;
}
+static int check_object_code(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ __u8 buf[15];
+
+ CHECK(perf_dlfilter_fns.object_code(ctx, sample->ip, buf, sizeof(buf)) > 0);
+
+ return 0;
+}
+
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
@@ -314,7 +323,8 @@ static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void
if (early && !d->do_early)
return 0;
- if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample) ||
+ check_object_code(ctx, sample))
return -1;
if (early)
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v2.c b/tools/perf/dlfilters/dlfilter-test-api-v2.c
index 38e593d92920..32ff619e881c 100644
--- a/tools/perf/dlfilters/dlfilter-test-api-v2.c
+++ b/tools/perf/dlfilters/dlfilter-test-api-v2.c
@@ -308,6 +308,15 @@ static int check_attr(void *ctx)
return 0;
}
+static int check_object_code(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ __u8 buf[15];
+
+ CHECK(perf_dlfilter_fns.object_code(ctx, sample->ip, buf, sizeof(buf)) > 0);
+
+ return 0;
+}
+
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
@@ -333,7 +342,8 @@ static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void
if (early && !d->do_early)
return 0;
- if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample) ||
+ check_object_code(ctx, sample))
return -1;
if (early)
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 1dbf27822ee2..4a1dc21b0450 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -282,13 +282,21 @@ static struct perf_event_attr *dlfilter__attr(void *ctx)
return &d->evsel->core.attr;
}
+static __s32 code_read(__u64 ip, struct map *map, struct machine *machine, void *buf, __u32 len)
+{
+ u64 offset = map__map_ip(map, ip);
+
+ if (ip + len >= map__end(map))
+ len = map__end(map) - ip;
+
+ return dso__data_read_offset(map__dso(map), machine, offset, buf, len);
+}
+
static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct addr_location *al;
struct addr_location a;
- struct map *map;
- u64 offset;
__s32 ret;
if (!d->ctx_valid)
@@ -298,27 +306,17 @@ static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
if (!al)
return -1;
- map = al->map;
-
- if (map && ip >= map__start(map) && ip < map__end(map) &&
+ if (al->map && ip >= map__start(al->map) && ip < map__end(al->map) &&
machine__kernel_ip(d->machine, ip) == machine__kernel_ip(d->machine, d->sample->ip))
- goto have_map;
+ return code_read(ip, al->map, d->machine, buf, len);
addr_location__init(&a);
+
thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
- if (!a.map) {
- ret = -1;
- goto out;
- }
+ ret = a.map ? code_read(ip, a.map, d->machine, buf, len) : -1;
- map = a.map;
-have_map:
- offset = map__map_ip(map, ip);
- if (ip + len >= map__end(map))
- len = map__end(map) - ip;
- ret = dso__data_read_offset(map__dso(map), d->machine, offset, buf, len);
-out:
addr_location__exit(&a);
+
return ret;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 8de6f39abd1b..d515ba8a0e16 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -295,7 +295,7 @@ static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *al
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.scale", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.scale", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -330,7 +330,7 @@ static int perf_pmu__parse_unit(struct perf_pmu *pmu, struct perf_pmu_alias *ali
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.unit", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.unit", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -364,7 +364,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.per-pkg", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.per-pkg", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -385,7 +385,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.snapshot", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.snapshot", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
new file mode 100644
index 000000000000..bc9514428dba
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
@@ -0,0 +1,13 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test failure of registering kprobe on non unique symbol
+# requires: kprobe_events
+
+SYMBOL='name_show'
+
+# We skip this test on kernel where SYMBOL is unique or does not exist.
+if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then
+ exit_unsupported
+fi
+
+! echo "p:test_non_unique ${SYMBOL}" > kprobe_events
diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h
index 112bc1da732a..ce33d306c2cb 100644
--- a/tools/testing/selftests/kvm/include/ucall_common.h
+++ b/tools/testing/selftests/kvm/include/ucall_common.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * tools/testing/selftests/kvm/include/kvm_util.h
- *
* Copyright (C) 2018, Google LLC.
*/
#ifndef SELFTEST_KVM_UCALL_COMMON_H
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 4fd042112526..25bc61dac5fb 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -68,6 +68,12 @@ struct xstate {
#define XFEATURE_MASK_OPMASK BIT_ULL(5)
#define XFEATURE_MASK_ZMM_Hi256 BIT_ULL(6)
#define XFEATURE_MASK_Hi16_ZMM BIT_ULL(7)
+#define XFEATURE_MASK_PT BIT_ULL(8)
+#define XFEATURE_MASK_PKRU BIT_ULL(9)
+#define XFEATURE_MASK_PASID BIT_ULL(10)
+#define XFEATURE_MASK_CET_USER BIT_ULL(11)
+#define XFEATURE_MASK_CET_KERNEL BIT_ULL(12)
+#define XFEATURE_MASK_LBR BIT_ULL(15)
#define XFEATURE_MASK_XTILE_CFG BIT_ULL(17)
#define XFEATURE_MASK_XTILE_DATA BIT_ULL(18)
@@ -147,6 +153,7 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_CLWB KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
#define X86_FEATURE_UMIP KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
#define X86_FEATURE_PKU KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
+#define X86_FEATURE_OSPKE KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
#define X86_FEATURE_LA57 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
#define X86_FEATURE_RDPID KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
#define X86_FEATURE_SGX_LC KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
@@ -553,6 +560,13 @@ static inline void xsetbv(u32 index, u64 value)
__asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
}
+static inline void wrpkru(u32 pkru)
+{
+ /* Note, ECX and EDX are architecturally required to be '0'. */
+ asm volatile(".byte 0x0f,0x01,0xef\n\t"
+ : : "a" (pkru), "c"(0), "d"(0));
+}
+
static inline struct desc_ptr get_gdt(void)
{
struct desc_ptr gdt;
@@ -908,6 +922,15 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
!kvm_cpu_has(feature.anti_feature);
}
+static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
+{
+ if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
+ return 0;
+
+ return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
+ ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
+}
+
static inline size_t kvm_cpuid2_size(int nr_entries)
{
return sizeof(struct kvm_cpuid2) +
diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c
index c4a69d8aeb68..74627514c4d4 100644
--- a/tools/testing/selftests/kvm/lib/guest_sprintf.c
+++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c
@@ -200,6 +200,13 @@ repeat:
++fmt;
}
+ /*
+ * Play nice with %llu, %llx, etc. KVM selftests only support
+ * 64-bit builds, so just treat %ll* the same as %l*.
+ */
+ if (qualifier == 'l' && *fmt == 'l')
+ ++fmt;
+
/* default base */
base = 10;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/apic.c b/tools/testing/selftests/kvm/lib/x86_64/apic.c
index 7168e25c194e..89153a333e83 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/apic.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/apic.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * tools/testing/selftests/kvm/lib/x86_64/processor.c
- *
* Copyright (C) 2021, Google LLC.
*/
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 20eb2e730800..8698d1ab60d0 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -1033,9 +1033,8 @@ static bool test_loop(const struct test_data *data,
struct test_result *rbestruntime)
{
uint64_t maxslots;
- struct test_result result;
+ struct test_result result = {};
- result.nloops = 0;
if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
&result.nloops,
&result.slot_runtime, &result.guest_runtime)) {
@@ -1089,7 +1088,7 @@ int main(int argc, char *argv[])
.seconds = 5,
.runs = 1,
};
- struct test_result rbestslottime;
+ struct test_result rbestslottime = {};
int tctr;
if (!check_memory_sizes())
@@ -1098,11 +1097,10 @@ int main(int argc, char *argv[])
if (!parse_args(argc, argv, &targs))
return -1;
- rbestslottime.slottimens = 0;
for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
const struct test_data *data = &tests[tctr];
unsigned int runctr;
- struct test_result rbestruntime;
+ struct test_result rbestruntime = {};
if (tctr > targs.tfirst)
pr_info("\n");
@@ -1110,7 +1108,6 @@ int main(int argc, char *argv[])
pr_info("Testing %s performance with %i runs, %d seconds each\n",
data->name, targs.runs, targs.seconds);
- rbestruntime.runtimens = 0;
for (runctr = 0; runctr < targs.runs; runctr++)
if (!test_loop(data, &targs,
&rbestslottime, &rbestruntime))
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
index e446d76d1c0c..6c1278562090 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * KVM_GET/SET_* tests
- *
* Copyright (C) 2022, Red Hat, Inc.
*
* Tests for Hyper-V extensions to SVM.
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 7f36c32fa760..18ac5c1952a3 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * tools/testing/selftests/kvm/nx_huge_page_test.c
- *
* Usage: to be run via nx_huge_page_test.sh, which does the necessary
* environment setup and teardown
*
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
index 0560149e66ed..7cbb409801ee 100755
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
@@ -4,7 +4,6 @@
# Wrapper script which performs setup and cleanup for nx_huge_pages_test.
# Makes use of root privileges to set up huge pages and KVM module parameters.
#
-# tools/testing/selftests/kvm/nx_huge_page_test.sh
# Copyright (C) 2022, Google LLC.
set -e
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 4c4925a8ab45..88b58aab7207 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -139,6 +139,83 @@ static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
static void __attribute__((__flatten__)) guest_code(void *arg)
{
GUEST_SYNC(1);
+
+ if (this_cpu_has(X86_FEATURE_XSAVE)) {
+ uint64_t supported_xcr0 = this_cpu_supported_xcr0();
+ uint8_t buffer[4096];
+
+ memset(buffer, 0xcc, sizeof(buffer));
+
+ set_cr4(get_cr4() | X86_CR4_OSXSAVE);
+ GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
+
+ xsetbv(0, xgetbv(0) | supported_xcr0);
+
+ /*
+ * Modify state for all supported xfeatures to take them out of
+ * their "init" state, i.e. to make them show up in XSTATE_BV.
+ *
+ * Note off-by-default features, e.g. AMX, are out of scope for
+ * this particular testcase as they have a different ABI.
+ */
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_FP);
+ asm volatile ("fincstp");
+
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_SSE);
+ asm volatile ("vmovdqu %0, %%xmm0" :: "m" (buffer));
+
+ if (supported_xcr0 & XFEATURE_MASK_YMM)
+ asm volatile ("vmovdqu %0, %%ymm0" :: "m" (buffer));
+
+ if (supported_xcr0 & XFEATURE_MASK_AVX512) {
+ asm volatile ("kmovq %0, %%k1" :: "r" (-1ull));
+ asm volatile ("vmovupd %0, %%zmm0" :: "m" (buffer));
+ asm volatile ("vmovupd %0, %%zmm16" :: "m" (buffer));
+ }
+
+ if (this_cpu_has(X86_FEATURE_MPX)) {
+ uint64_t bounds[2] = { 10, 0xffffffffull };
+ uint64_t output[2] = { };
+
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDREGS);
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDCSR);
+
+ /*
+ * Don't bother trying to get BNDCSR into the INUSE
+ * state. MSR_IA32_BNDCFGS doesn't count as it isn't
+ * managed via XSAVE/XRSTOR, and BNDCFGU can only be
+ * modified by XRSTOR. Stuffing XSTATE_BV in the host
+ * is simpler than doing XRSTOR here in the guest.
+ *
+ * However, temporarily enable MPX in BNDCFGS so that
+ * BNDMOV actually loads BND1. If MPX isn't *fully*
+ * enabled, all MPX instructions are treated as NOPs.
+ *
+ * Hand encode "bndmov (%rax),%bnd1" as support for MPX
+ * mnemonics/registers has been removed from gcc and
+ * clang (and was never fully supported by clang).
+ */
+ wrmsr(MSR_IA32_BNDCFGS, BIT_ULL(0));
+ asm volatile (".byte 0x66,0x0f,0x1a,0x08" :: "a" (bounds));
+ /*
+ * Hand encode "bndmov %bnd1, (%rax)" to sanity check
+ * that BND1 actually got loaded.
+ */
+ asm volatile (".byte 0x66,0x0f,0x1b,0x08" :: "a" (output));
+ wrmsr(MSR_IA32_BNDCFGS, 0);
+
+ GUEST_ASSERT_EQ(bounds[0], output[0]);
+ GUEST_ASSERT_EQ(bounds[1], output[1]);
+ }
+ if (this_cpu_has(X86_FEATURE_PKU)) {
+ GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_PKRU);
+ set_cr4(get_cr4() | X86_CR4_PKE);
+ GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSPKE));
+
+ wrpkru(-1u);
+ }
+ }
+
GUEST_SYNC(2);
if (arg) {
@@ -153,10 +230,11 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
int main(int argc, char *argv[])
{
+ uint64_t *xstate_bv, saved_xstate_bv;
vm_vaddr_t nested_gva = 0;
-
+ struct kvm_cpuid2 empty_cpuid = {};
struct kvm_regs regs1, regs2;
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu, *vcpuN;
struct kvm_vm *vm;
struct kvm_x86_state *state;
struct ucall uc;
@@ -209,6 +287,34 @@ int main(int argc, char *argv[])
/* Restore state in a new VM. */
vcpu = vm_recreate_with_one_vcpu(vm);
vcpu_load_state(vcpu, state);
+
+ /*
+ * Restore XSAVE state in a dummy vCPU, first without doing
+ * KVM_SET_CPUID2, and then with an empty guest CPUID. Except
+ * for off-by-default xfeatures, e.g. AMX, KVM is supposed to
+ * allow KVM_SET_XSAVE regardless of guest CPUID. Manually
+ * load only XSAVE state, MSRs in particular have a much more
+ * convoluted ABI.
+ *
+ * Load two versions of XSAVE state: one with the actual guest
+ * XSAVE state, and one with all supported features forced "on"
+ * in xstate_bv, e.g. to ensure that KVM allows loading all
+ * supported features, even if something goes awry in saving
+ * the original snapshot.
+ */
+ xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512];
+ saved_xstate_bv = *xstate_bv;
+
+ vcpuN = __vm_vcpu_add(vm, vcpu->id + 1);
+ vcpu_xsave_set(vcpuN, state->xsave);
+ *xstate_bv = kvm_cpu_supported_xcr0();
+ vcpu_xsave_set(vcpuN, state->xsave);
+
+ vcpu_init_cpuid(vcpuN, &empty_cpuid);
+ vcpu_xsave_set(vcpuN, state->xsave);
+ *xstate_bv = saved_xstate_bv;
+ vcpu_xsave_set(vcpuN, state->xsave);
+
kvm_x86_state_cleanup(state);
memset(&regs2, 0, sizeof(regs2));
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
index 5b669818e39a..59c7304f805e 100644
--- a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
+++ b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * svm_vmcall_test
- *
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
- *
- * Xen shared_info / pvclock testing
*/
#include "test_util.h"
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 05898ad9f4d9..9ec9ab60b63e 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -1,10 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * svm_vmcall_test
- *
* Copyright © 2021 Amazon.com, Inc. or its affiliates.
- *
- * Xen shared_info / pvclock testing
*/
#include "test_util.h"
diff --git a/tools/testing/selftests/mm/mremap_dontunmap.c b/tools/testing/selftests/mm/mremap_dontunmap.c
index ca2359835e75..a06e73ec8568 100644
--- a/tools/testing/selftests/mm/mremap_dontunmap.c
+++ b/tools/testing/selftests/mm/mremap_dontunmap.c
@@ -7,6 +7,7 @@
*/
#define _GNU_SOURCE
#include <sys/mman.h>
+#include <linux/mman.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 8b017070960d..4a2881d43989 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -34,6 +34,7 @@ TEST_PROGS += gro.sh
TEST_PROGS += gre_gso.sh
TEST_PROGS += cmsg_so_mark.sh
TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh
+TEST_PROGS += netns-name.sh
TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index e7d2a530618a..66d0db7a2614 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -2437,6 +2437,9 @@ ipv4_mpath_list_test()
run_cmd "ip -n ns2 route add 203.0.113.0/24
nexthop via 172.16.201.2 nexthop via 172.16.202.2"
run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.fib_multipath_hash_policy=1"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.veth2.rp_filter=0"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=0"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.default.rp_filter=0"
set +e
local dmac=$(ip -n ns2 -j link show dev veth2 | jq -r '.[]["address"]')
@@ -2449,7 +2452,7 @@ ipv4_mpath_list_test()
# words, the FIB lookup tracepoint needs to be triggered for every
# packet.
local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
- run_cmd "perf stat -e fib:fib_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+ run_cmd "perf stat -a -e fib:fib_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
list_rcv_eval $tmp_file $diff
@@ -2494,7 +2497,7 @@ ipv6_mpath_list_test()
# words, the FIB lookup tracepoint needs to be triggered for every
# packet.
local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
- run_cmd "perf stat -e fib6:fib6_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+ run_cmd "perf stat -a -e fib6:fib6_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
list_rcv_eval $tmp_file $diff
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index ee1f89a872b3..dc895b7b94e1 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -1432,7 +1432,9 @@ chk_rst_nr()
count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
if [ -z "$count" ]; then
print_skip
- elif [ $count -lt $rst_tx ]; then
+ # accept more rst than expected except if we don't expect any
+ elif { [ $rst_tx -ne 0 ] && [ $count -lt $rst_tx ]; } ||
+ { [ $rst_tx -eq 0 ] && [ $count -ne 0 ]; }; then
fail_test "got $count MP_RST[s] TX expected $rst_tx"
else
print_ok
@@ -1442,7 +1444,9 @@ chk_rst_nr()
count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
if [ -z "$count" ]; then
print_skip
- elif [ "$count" -lt "$rst_rx" ]; then
+ # accept more rst than expected except if we don't expect any
+ elif { [ $rst_rx -ne 0 ] && [ $count -lt $rst_rx ]; } ||
+ { [ $rst_rx -eq 0 ] && [ $count -ne 0 ]; }; then
fail_test "got $count MP_RST[s] RX expected $rst_rx"
else
print_ok
@@ -2305,6 +2309,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_rm_tx_nr 1
chk_rm_nr 1 1
+ chk_rst_nr 0 0
fi
# multiple subflows, remove
@@ -2317,6 +2322,7 @@ remove_tests()
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
chk_rm_nr 2 2
+ chk_rst_nr 0 0
fi
# single address, remove
@@ -2329,6 +2335,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
+ chk_rst_nr 0 0
fi
# subflow and signal, remove
@@ -2342,6 +2349,7 @@ remove_tests()
chk_join_nr 2 2 2
chk_add_nr 1 1
chk_rm_nr 1 1
+ chk_rst_nr 0 0
fi
# subflows and signal, remove
@@ -2356,6 +2364,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 2 2
+ chk_rst_nr 0 0
fi
# addresses remove
@@ -2370,6 +2379,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 3 3
chk_rm_nr 3 3 invert
+ chk_rst_nr 0 0
fi
# invalid addresses remove
@@ -2384,6 +2394,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 3 3
chk_rm_nr 3 1 invert
+ chk_rst_nr 0 0
fi
# subflows and signal, flush
@@ -2398,6 +2409,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 1 1
chk_rm_nr 1 3 invert simult
+ chk_rst_nr 0 0
fi
# subflows flush
@@ -2417,6 +2429,7 @@ remove_tests()
else
chk_rm_nr 3 3
fi
+ chk_rst_nr 0 0
fi
# addresses flush
@@ -2431,6 +2444,7 @@ remove_tests()
chk_join_nr 3 3 3
chk_add_nr 3 3
chk_rm_nr 3 3 invert simult
+ chk_rst_nr 0 0
fi
# invalid addresses flush
@@ -2445,6 +2459,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 3 3
chk_rm_nr 3 1 invert
+ chk_rst_nr 0 0
fi
# remove id 0 subflow
@@ -2456,6 +2471,7 @@ remove_tests()
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
chk_rm_nr 1 1
+ chk_rst_nr 0 0
fi
# remove id 0 address
@@ -2468,6 +2484,7 @@ remove_tests()
chk_join_nr 1 1 1
chk_add_nr 1 1
chk_rm_nr 1 1 invert
+ chk_rst_nr 0 0 invert
fi
}
diff --git a/tools/testing/selftests/net/netns-name.sh b/tools/testing/selftests/net/netns-name.sh
new file mode 100755
index 000000000000..7d3d3fc99461
--- /dev/null
+++ b/tools/testing/selftests/net/netns-name.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+set -o pipefail
+
+NS=netns-name-test
+DEV=dummy-dev0
+DEV2=dummy-dev1
+ALT_NAME=some-alt-name
+
+RET_CODE=0
+
+cleanup() {
+ ip netns del $NS
+}
+
+trap cleanup EXIT
+
+fail() {
+ echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2
+ RET_CODE=1
+}
+
+ip netns add $NS
+
+#
+# Test basic move without a rename
+#
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 ||
+ fail "Can't perform a netns move"
+ip link show dev $DEV >> /dev/null || fail "Device not found after move"
+ip link del $DEV || fail
+
+#
+# Test move with a conflict
+#
+ip link add name $DEV type dummy
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 2> /dev/null &&
+ fail "Performed a netns move with a name conflict"
+ip link show dev $DEV >> /dev/null || fail "Device not found after move"
+ip -netns $NS link del $DEV || fail
+ip link del $DEV || fail
+
+#
+# Test move with a conflict and rename
+#
+ip link add name $DEV type dummy
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 name $DEV2 ||
+ fail "Can't perform a netns move with rename"
+ip link del $DEV2 || fail
+ip link del $DEV || fail
+
+#
+# Test dup alt-name with netns move
+#
+ip link add name $DEV type dummy || fail
+ip link property add dev $DEV altname $ALT_NAME || fail
+ip -netns $NS link add name $DEV2 type dummy || fail
+ip -netns $NS link property add dev $DEV2 altname $ALT_NAME || fail
+
+ip -netns $NS link set dev $DEV2 netns 1 2> /dev/null &&
+ fail "Moved with alt-name dup"
+
+ip link del $DEV || fail
+ip -netns $NS link del $DEV2 || fail
+
+#
+# Test creating alt-name in one net-ns and using in another
+#
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link property add dev $DEV altname $ALT_NAME || fail
+ip -netns $NS link set dev $DEV netns 1 || fail
+ip link show dev $ALT_NAME >> /dev/null || fail "Can't find alt-name after move"
+ip -netns $NS link show dev $ALT_NAME 2> /dev/null &&
+ fail "Can still find alt-name after move"
+ip link del $DEV || fail
+
+echo -ne "$(basename $0) \t\t\t\t"
+if [ $RET_CODE -eq 0 ]; then
+ echo "[ OK ]"
+else
+ echo "[ FAIL ]"
+fi
+exit $RET_CODE
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 9c2012d70b08..f8499d4c87f3 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -3,6 +3,8 @@
#
# OVS kernel module self tests
+trap ovs_exit_sig EXIT TERM INT ERR
+
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
@@ -142,6 +144,12 @@ ovs_add_flow () {
return 0
}
+ovs_del_flows () {
+ info "Deleting all flows from DP: sbx:$1 br:$2"
+ ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py del-flows "$2"
+ return 0
+}
+
ovs_drop_record_and_run () {
local sbx=$1
shift
@@ -198,6 +206,17 @@ test_drop_reason() {
ip netns exec server ip addr add 172.31.110.20/24 dev s1
ip netns exec server ip link set s1 up
+ # Check if drop reasons can be sent
+ ovs_add_flow "test_drop_reason" dropreason \
+ 'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(10)' 2>/dev/null
+ if [ $? == 1 ]; then
+ info "no support for drop reasons - skipping"
+ ovs_exit_sig
+ return $ksft_skip
+ fi
+
+ ovs_del_flows "test_drop_reason" dropreason
+
# Allow ARP
ovs_add_flow "test_drop_reason" dropreason \
'in_port(1),eth(),eth_type(0x0806),arp()' '2' || return 1
@@ -525,7 +544,7 @@ run_test() {
fi
if python3 ovs-dpctl.py -h 2>&1 | \
- grep "Need to install the python" >/dev/null 2>&1; then
+ grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then
stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}"
return $ksft_skip
fi
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index 912dc8c49085..b97e621face9 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -28,8 +28,10 @@ try:
from pyroute2.netlink import nlmsg_atoms
from pyroute2.netlink.exceptions import NetlinkError
from pyroute2.netlink.generic import GenericNetlinkSocket
+ import pyroute2
+
except ModuleNotFoundError:
- print("Need to install the python pyroute2 package.")
+ print("Need to install the python pyroute2 package >= 0.6.")
sys.exit(0)
@@ -1117,12 +1119,14 @@ class ovskey(nla):
"src",
lambda x: str(ipaddress.IPv4Address(x)),
int,
+ convert_ipv4,
),
(
"dst",
"dst",
- lambda x: str(ipaddress.IPv6Address(x)),
+ lambda x: str(ipaddress.IPv4Address(x)),
int,
+ convert_ipv4,
),
("tp_src", "tp_src", "%d", int),
("tp_dst", "tp_dst", "%d", int),
@@ -1904,6 +1908,32 @@ class OvsFlow(GenericNetlinkSocket):
raise ne
return reply
+ def del_flows(self, dpifindex):
+ """
+ Send a del message to the kernel that will drop all flows.
+
+ dpifindex should be a valid datapath obtained by calling
+ into the OvsDatapath lookup
+ """
+
+ flowmsg = OvsFlow.ovs_flow_msg()
+ flowmsg["cmd"] = OVS_FLOW_CMD_DEL
+ flowmsg["version"] = OVS_DATAPATH_VERSION
+ flowmsg["reserved"] = 0
+ flowmsg["dpifindex"] = dpifindex
+
+ try:
+ reply = self.nlm_request(
+ flowmsg,
+ msg_type=self.prid,
+ msg_flags=NLM_F_REQUEST | NLM_F_ACK,
+ )
+ reply = reply[0]
+ except NetlinkError as ne:
+ print(flowmsg)
+ raise ne
+ return reply
+
def dump(self, dpifindex, flowspec=None):
"""
Returns a list of messages containing flows.
@@ -1998,6 +2028,12 @@ def main(argv):
nlmsg_atoms.ovskey = ovskey
nlmsg_atoms.ovsactions = ovsactions
+ # version check for pyroute2
+ prverscheck = pyroute2.__version__.split(".")
+ if int(prverscheck[0]) == 0 and int(prverscheck[1]) < 6:
+ print("Need to upgrade the python pyroute2 package to >= 0.6.")
+ sys.exit(0)
+
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
@@ -2060,6 +2096,9 @@ def main(argv):
addflcmd.add_argument("flow", help="Flow specification")
addflcmd.add_argument("acts", help="Flow actions")
+ delfscmd = subparsers.add_parser("del-flows")
+ delfscmd.add_argument("flsbr", help="Datapath name")
+
args = parser.parse_args()
if args.verbose > 0:
@@ -2143,6 +2182,11 @@ def main(argv):
flow = OvsFlow.ovs_flow_msg()
flow.parse(args.flow, args.acts, rep["dpifindex"])
ovsflow.add_flow(rep["dpifindex"], flow)
+ elif hasattr(args, "flsbr"):
+ rep = ovsdp.info(args.flsbr, 0)
+ if rep is None:
+ print("DP '%s' not found." % args.flsbr)
+ ovsflow.del_flows(rep["dpifindex"])
return 0
diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
index bb34329e02a7..99ed5bd6e840 100755
--- a/tools/testing/selftests/netfilter/nft_audit.sh
+++ b/tools/testing/selftests/netfilter/nft_audit.sh
@@ -11,6 +11,12 @@ nft --version >/dev/null 2>&1 || {
exit $SKIP_RC
}
+# Run everything in a separate network namespace
+[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
+
+# give other scripts a chance to finish - audit_logread sees all activity
+sleep 1
+
logfile=$(mktemp)
rulefile=$(mktemp)
echo "logging into $logfile"
@@ -93,6 +99,12 @@ do_test 'nft add counter t1 c1' \
do_test 'nft add counter t2 c1; add counter t2 c2' \
'table=t2 family=2 entries=2 op=nft_register_obj'
+for ((i = 3; i <= 500; i++)); do
+ echo "add counter t2 c$i"
+done >$rulefile
+do_test "nft -f $rulefile" \
+'table=t2 family=2 entries=498 op=nft_register_obj'
+
# adding/updating quotas
do_test 'nft add quota t1 q1 { 10 bytes }' \
@@ -101,6 +113,12 @@ do_test 'nft add quota t1 q1 { 10 bytes }' \
do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
'table=t2 family=2 entries=2 op=nft_register_obj'
+for ((i = 3; i <= 500; i++)); do
+ echo "add quota t2 q$i { 10 bytes }"
+done >$rulefile
+do_test "nft -f $rulefile" \
+'table=t2 family=2 entries=498 op=nft_register_obj'
+
# changing the quota value triggers obj update path
do_test 'nft add quota t1 q1 { 20 bytes }' \
'table=t1 family=2 entries=1 op=nft_register_obj'
@@ -150,6 +168,40 @@ done
do_test 'nft reset set t1 s' \
'table=t1 family=2 entries=3 op=nft_reset_setelem'
+# resetting counters
+
+do_test 'nft reset counter t1 c1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset counters t1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset counters t2' \
+'table=t2 family=2 entries=342 op=nft_reset_obj
+table=t2 family=2 entries=158 op=nft_reset_obj'
+
+do_test 'nft reset counters' \
+'table=t1 family=2 entries=1 op=nft_reset_obj
+table=t2 family=2 entries=341 op=nft_reset_obj
+table=t2 family=2 entries=159 op=nft_reset_obj'
+
+# resetting quotas
+
+do_test 'nft reset quota t1 q1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset quotas t1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset quotas t2' \
+'table=t2 family=2 entries=315 op=nft_reset_obj
+table=t2 family=2 entries=185 op=nft_reset_obj'
+
+do_test 'nft reset quotas' \
+'table=t1 family=2 entries=1 op=nft_reset_obj
+table=t2 family=2 entries=314 op=nft_reset_obj
+table=t2 family=2 entries=186 op=nft_reset_obj'
+
# deleting rules
readarray -t handles < <(nft -a list chain t1 c1 | \
diff --git a/tools/testing/selftests/user_events/abi_test.c b/tools/testing/selftests/user_events/abi_test.c
index 8202f1327c39..f5575ef2007c 100644
--- a/tools/testing/selftests/user_events/abi_test.c
+++ b/tools/testing/selftests/user_events/abi_test.c
@@ -47,7 +47,7 @@ static int change_event(bool enable)
return ret;
}
-static int reg_enable(long *enable, int size, int bit)
+static int reg_enable(void *enable, int size, int bit)
{
struct user_reg reg = {0};
int fd = open(data_file, O_RDWR);
@@ -69,7 +69,7 @@ static int reg_enable(long *enable, int size, int bit)
return ret;
}
-static int reg_disable(long *enable, int bit)
+static int reg_disable(void *enable, int bit)
{
struct user_unreg reg = {0};
int fd = open(data_file, O_RDWR);
@@ -90,7 +90,8 @@ static int reg_disable(long *enable, int bit)
}
FIXTURE(user) {
- long check;
+ int check;
+ long check_long;
bool umount;
};
@@ -99,6 +100,7 @@ FIXTURE_SETUP(user) {
change_event(false);
self->check = 0;
+ self->check_long = 0;
}
FIXTURE_TEARDOWN(user) {
@@ -136,9 +138,9 @@ TEST_F(user, bit_sizes) {
#if BITS_PER_LONG == 8
/* Allow 0-64 bits for 64-bit */
- ASSERT_EQ(0, reg_enable(&self->check, sizeof(long), 63));
- ASSERT_NE(0, reg_enable(&self->check, sizeof(long), 64));
- ASSERT_EQ(0, reg_disable(&self->check, 63));
+ ASSERT_EQ(0, reg_enable(&self->check_long, sizeof(long), 63));
+ ASSERT_NE(0, reg_enable(&self->check_long, sizeof(long), 64));
+ ASSERT_EQ(0, reg_disable(&self->check_long, 63));
#endif
/* Disallowed sizes (everything beside 4 and 8) */
@@ -200,7 +202,7 @@ static int clone_check(void *check)
for (i = 0; i < 10; ++i) {
usleep(100000);
- if (*(long *)check)
+ if (*(int *)check)
return 0;
}
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h
index 834a90bd3270..822ecaa8e4df 100644
--- a/tools/virtio/linux/dma-mapping.h
+++ b/tools/virtio/linux/dma-mapping.h
@@ -24,11 +24,23 @@ enum dma_data_direction {
#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
+#define dma_map_single_attrs(d, p, s, dir, a) (virt_to_phys(p))
#define dma_mapping_error(...) (0)
#define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
+#define sg_dma_address(sg) (0)
+#define dma_need_sync(v, a) (0)
+#define dma_unmap_single_attrs(d, a, s, r, t) do { \
+ (void)(d); (void)(a); (void)(s); (void)(r); (void)(t); \
+} while (0)
+#define dma_sync_single_range_for_cpu(d, a, o, s, r) do { \
+ (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
+#define dma_sync_single_range_for_device(d, a, o, s, r) do { \
+ (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
#define dma_max_mapping_size(...) SIZE_MAX
#endif