summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acexcep.h10
-rw-r--r--include/acpi/acpixf.h8
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h1
-rw-r--r--include/acpi/actbl2.h21
-rw-r--r--include/acpi/processor.h34
-rw-r--r--include/asm-generic/io.h98
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h3
-rw-r--r--include/crypto/scatterwalk.h4
-rw-r--r--include/drm/bridge/dw_dp.h20
-rw-r--r--include/drm/bridge/samsung-dsim.h16
-rw-r--r--include/drm/drm_bridge.h47
-rw-r--r--include/drm/drm_buddy.h9
-rw-r--r--include/drm/drm_color_mgmt.h1
-rw-r--r--include/drm/drm_device.h14
-rw-r--r--include/drm/drm_format_helper.h4
-rw-r--r--include/drm/drm_gem.h51
-rw-r--r--include/drm/drm_gpusvm.h136
-rw-r--r--include/drm/drm_gpuvm.h68
-rw-r--r--include/drm/drm_mipi_dsi.h147
-rw-r--r--include/drm/drm_pagemap.h50
-rw-r--r--include/drm/drm_panel.h14
-rw-r--r--include/drm/drm_utils.h8
-rw-r--r--include/drm/intel/i915_component.h1
-rw-r--r--include/drm/intel/intel_lb_mei_interface.h70
-rw-r--r--include/drm/intel/pciids.h5
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/drm/ttm/ttm_resource.h2
-rw-r--r--include/dt-bindings/clock/axis,artpec8-clk.h169
-rw-r--r--include/dt-bindings/clock/qcom,apss-ipq.h6
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc7280.h4
-rw-r--r--include/dt-bindings/clock/raspberrypi,rp1-clocks.h4
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h1
-rw-r--r--include/dt-bindings/clock/sun55i-a523-ccu.h1
-rw-r--r--include/dt-bindings/clock/sun55i-a523-mcu-ccu.h54
-rw-r--r--include/dt-bindings/clock/tegra30-car.h3
-rw-r--r--include/dt-bindings/interconnect/qcom,ipq5424.h3
-rw-r--r--include/dt-bindings/media/tvp5150.h2
-rw-r--r--include/dt-bindings/memory/tegra210-mc.h74
-rw-r--r--include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h36
-rw-r--r--include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h22
-rw-r--r--include/dt-bindings/reset/nvidia,tegra114-car.h13
-rw-r--r--include/dt-bindings/reset/sun55i-a523-mcu-ccu.h30
-rw-r--r--include/dt-bindings/reset/thead,th1520-reset.h7
-rw-r--r--include/dt-bindings/thermal/tegra114-soctherm.h19
-rw-r--r--include/kunit/test.h95
-rw-r--r--include/linux/acpi_rimt.h28
-rw-r--r--include/linux/alloc_tag.h12
-rw-r--r--include/linux/backing-dev.h14
-rw-r--r--include/linux/bio-integrity.h1
-rw-r--r--include/linux/bio.h18
-rw-r--r--include/linux/blk-integrity.h32
-rw-r--r--include/linux/blk-mq-dma.h25
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk_types.h19
-rw-r--r--include/linux/blkdev.h28
-rw-r--r--include/linux/bnxt/hsi.h376
-rw-r--r--include/linux/bpfptr.h2
-rw-r--r--include/linux/bvec.h7
-rw-r--r--include/linux/can/bittiming.h48
-rw-r--r--include/linux/can/dev.h66
-rw-r--r--include/linux/can/dev/peak_canfd.h4
-rw-r--r--include/linux/codetag.h5
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/damon.h18
-rw-r--r--include/linux/dcache.h5
-rw-r--r--include/linux/device/devres.h2
-rw-r--r--include/linux/dibs.h464
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma-map-ops.h8
-rw-r--r--include/linux/dma-mapping.h33
-rw-r--r--include/linux/dpll.h6
-rw-r--r--include/linux/ethtool.h27
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/f2fs_fs.h1
-rw-r--r--include/linux/fbcon.h7
-rw-r--r--include/linux/firewire.h33
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h6
-rw-r--r--include/linux/firmware/qcom/qcom_tzmem.h15
-rw-r--r--include/linux/freezer.h2
-rw-r--r--include/linux/fs.h54
-rw-r--r--include/linux/fs_context.h9
-rw-r--r--include/linux/fsl/ptp_qoriq.h10
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/habanalabs/cpucp_if.h4
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/highmem-internal.h36
-rw-r--r--include/linux/highmem.h8
-rw-r--r--include/linux/huge_mm.h112
-rw-r--r--include/linux/hugetlb.h7
-rw-r--r--include/linux/hw_bitfield.h62
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/i3c/master.h26
-rw-r--r--include/linux/idr.h8
-rw-r--r--include/linux/ieee80211.h300
-rw-r--r--include/linux/if_pppox.h2
-rw-r--r--include/linux/inet_diag.h20
-rw-r--r--include/linux/io-pgtable.h1
-rw-r--r--include/linux/io_uring/cmd.h69
-rw-r--r--include/linux/io_uring_types.h31
-rw-r--r--include/linux/iommu-dma.h11
-rw-r--r--include/linux/iopoll.h136
-rw-r--r--include/linux/ipmi_smi.h11
-rw-r--r--include/linux/ipv6.h39
-rw-r--r--include/linux/ism.h28
-rw-r--r--include/linux/kasan-enabled.h32
-rw-r--r--include/linux/kasan.h19
-rw-r--r--include/linux/kernel.h21
-rw-r--r--include/linux/kexec.h5
-rw-r--r--include/linux/kexec_handover.h6
-rw-r--r--include/linux/khugepaged.h6
-rw-r--r--include/linux/kmsan.h9
-rw-r--r--include/linux/ksm.h12
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/list.h22
-rw-r--r--include/linux/local_lock.h2
-rw-r--r--include/linux/local_lock_internal.h16
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/maple_tree.h33
-rw-r--r--include/linux/mei_cl_bus.h1
-rw-r--r--include/linux/memcontrol.h67
-rw-r--r--include/linux/mempool.h2
-rw-r--r--include/linux/memremap.h45
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/migrate.h11
-rw-r--r--include/linux/mlx5/cq.h1
-rw-r--r--include/linux/mlx5/device.h5
-rw-r--r--include/linux/mlx5/driver.h10
-rw-r--r--include/linux/mlx5/mlx5_ifc.h274
-rw-r--r--include/linux/mlx5/qp.h16
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h273
-rw-r--r--include/linux/mm_inline.h37
-rw-r--r--include/linux/mm_types.h136
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmap_lock.h85
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmzone.h91
-rw-r--r--include/linux/module.h18
-rw-r--r--include/linux/moduleparam.h13
-rw-r--r--include/linux/mount.h9
-rw-r--r--include/linux/namei.h4
-rw-r--r--include/linux/net/intel/libie/adminq.h95
-rw-r--r--include/linux/net/intel/libie/fwlog.h85
-rw-r--r--include/linux/netdevice.h47
-rw-r--r--include/linux/netfs.h2
-rw-r--r--include/linux/netpoll.h1
-rw-r--r--include/linux/nfs_page.h2
-rw-r--r--include/linux/nfs_xdr.h4
-rw-r--r--include/linux/nfslocalio.h2
-rw-r--r--include/linux/nvmem-provider.h2
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_irq.h6
-rw-r--r--include/linux/once.h4
-rw-r--r--include/linux/oom.h2
-rw-r--r--include/linux/overflow.h70
-rw-r--r--include/linux/page-flags.h43
-rw-r--r--include/linux/pageblock-flags.h12
-rw-r--r--include/linux/pagemap.h67
-rw-r--r--include/linux/pagevec.h4
-rw-r--r--include/linux/panic.h6
-rw-r--r--include/linux/pgalloc_tag.h7
-rw-r--r--include/linux/pgtable.h26
-rw-r--r--include/linux/phy.h53
-rw-r--r--include/linux/phy_fixed.h18
-rw-r--r--include/linux/phylink.h7
-rw-r--r--include/linux/pinctrl/pinconf-generic.h12
-rw-r--r--include/linux/pinctrl/pinctrl.h14
-rw-r--r--include/linux/pinctrl/pinmux.h2
-rw-r--r--include/linux/pm_opp.h30
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/power/max77705_charger.h144
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/printk.h2
-rw-r--r--include/linux/property.h10
-rw-r--r--include/linux/ptp_clock_kernel.h32
-rw-r--r--include/linux/ptr_ring.h42
-rw-r--r--include/linux/rmap.h67
-rw-r--r--include/linux/rtmutex.h10
-rw-r--r--include/linux/scatterlist.h3
-rw-r--r--include/linux/sched/coredump.h18
-rw-r--r--include/linux/sched/mm.h4
-rw-r--r--include/linux/sched/task.h5
-rw-r--r--include/linux/scmi_protocol.h2
-rw-r--r--include/linux/screen_info.h2
-rw-r--r--include/linux/security.h4
-rw-r--r--include/linux/sfp.h48
-rw-r--r--include/linux/shmem_fs.h4
-rw-r--r--include/linux/skbuff.h52
-rw-r--r--include/linux/skmsg.h2
-rw-r--r--include/linux/slab.h90
-rw-r--r--include/linux/soc/airoha/airoha_offload.h316
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h2
-rw-r--r--include/linux/soc/qcom/geni-se.h4
-rw-r--r--include/linux/soc/qcom/mdt_loader.h7
-rw-r--r--include/linux/soundwire/sdw.h17
-rw-r--r--include/linux/stmmac.h35
-rw-r--r--include/linux/sunrpc/debug.h30
-rw-r--r--include/linux/sunrpc/svc.h4
-rw-r--r--include/linux/sunrpc/svc_xprt.h3
-rw-r--r--include/linux/sunrpc/xdr.h8
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/swap.h50
-rw-r--r--include/linux/tcp.h52
-rw-r--r--include/linux/tee_core.h113
-rw-r--r--include/linux/tee_drv.h22
-rw-r--r--include/linux/udp.h9
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/usb/uvc.h22
-rw-r--r--include/linux/videodev2.h2
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmalloc.h12
-rw-r--r--include/linux/wait.h12
-rw-r--r--include/linux/writeback.h6
-rw-r--r--include/linux/zpool.h86
-rw-r--r--include/media/cadence/cdns-csi2rx.h19
-rw-r--r--include/media/drv-intf/cx25840.h2
-rw-r--r--include/media/drv-intf/msp3400.h2
-rw-r--r--include/media/i2c/bt819.h2
-rw-r--r--include/media/i2c/cs5345.h2
-rw-r--r--include/media/i2c/cs53l32a.h2
-rw-r--r--include/media/i2c/m52790.h2
-rw-r--r--include/media/i2c/mt9v011.h2
-rw-r--r--include/media/i2c/mt9v022.h13
-rw-r--r--include/media/i2c/mt9v032.h12
-rw-r--r--include/media/i2c/saa7115.h2
-rw-r--r--include/media/i2c/saa7127.h2
-rw-r--r--include/media/i2c/ths7303.h2
-rw-r--r--include/media/i2c/tvaudio.h2
-rw-r--r--include/media/i2c/upd64031a.h2
-rw-r--r--include/media/i2c/upd64083.h2
-rw-r--r--include/media/i2c/wm8775.h2
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/v4l2-common.h103
-rw-r--r--include/media/v4l2-ctrls.h6
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/media/v4l2-device.h2
-rw-r--r--include/media/v4l2-dv-timings.h1
-rw-r--r--include/media/v4l2-fh.h30
-rw-r--r--include/media/v4l2-ioctl.h238
-rw-r--r--include/media/v4l2-mem2mem.h42
-rw-r--r--include/media/v4l2-subdev.h57
-rw-r--r--include/net/act_api.h14
-rw-r--r--include/net/bluetooth/bluetooth.h3
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h11
-rw-r--r--include/net/bluetooth/hci_drv.h2
-rw-r--r--include/net/bluetooth/mgmt.h9
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bond_options.h1
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/cfg80211.h282
-rw-r--r--include/net/cls_cgroup.h2
-rw-r--r--include/net/devlink.h24
-rw-r--r--include/net/dropreason-core.h6
-rw-r--r--include/net/dst.h16
-rw-r--r--include/net/flow.h11
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/gro.h32
-rw-r--r--include/net/hotdata.h7
-rw-r--r--include/net/icmp.h10
-rw-r--r--include/net/inet6_hashtables.h20
-rw-r--r--include/net/inet_connection_sock.h13
-rw-r--r--include/net/inet_dscp.h6
-rw-r--r--include/net/inet_hashtables.h40
-rw-r--r--include/net/inet_timewait_sock.h11
-rw-r--r--include/net/ip.h15
-rw-r--r--include/net/ip6_route.h10
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ip_tunnels.h4
-rw-r--r--include/net/libeth/xdp.h11
-rw-r--r--include/net/mac80211.h10
-rw-r--r--include/net/mana/mana.h4
-rw-r--r--include/net/netdev_queues.h9
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h8
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h10
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nf_tables_core.h2
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/sctp.h4
-rw-r--r--include/net/nfc/nci_core.h2
-rw-r--r--include/net/page_pool/helpers.h17
-rw-r--r--include/net/ping.h1
-rw-r--r--include/net/proto_memory.h4
-rw-r--r--include/net/psp.h12
-rw-r--r--include/net/psp/functions.h209
-rw-r--r--include/net/psp/types.h184
-rw-r--r--include/net/raw.h1
-rw-r--r--include/net/request_sock.h2
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/rps.h92
-rw-r--r--include/net/sctp/auth.h17
-rw-r--r--include/net/sctp/constants.h9
-rw-r--r--include/net/sctp/structs.h35
-rw-r--r--include/net/seg6_hmac.h20
-rw-r--r--include/net/smc.h51
-rw-r--r--include/net/snmp.h5
-rw-r--r--include/net/sock.h135
-rw-r--r--include/net/tc_act/tc_skbmod.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tc_act/tc_vlan.h1
-rw-r--r--include/net/tcp.h108
-rw-r--r--include/net/tcp_ao.h1
-rw-r--r--include/net/tcp_ecn.h642
-rw-r--r--include/net/timewait_sock.h7
-rw-r--r--include/net/udp.h20
-rw-r--r--include/net/xdp.h64
-rw-r--r--include/rdma/ib_mad.h1
-rw-r--r--include/rdma/ib_sa.h37
-rw-r--r--include/rdma/rdma_cm.h21
-rw-r--r--include/scsi/libsas.h10
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsicam.h7
-rw-r--r--include/soc/at91/sama7-sfrbu.h7
-rw-r--r--include/soc/rockchip/rk3588_grf.h8
-rw-r--r--include/soc/rockchip/rockchip_grf.h1
-rw-r--r--include/sound/compress_driver.h2
-rw-r--r--include/sound/cs-amp-lib.h1
-rw-r--r--include/sound/cs35l56.h5
-rw-r--r--include/sound/dmaengine_pcm.h5
-rw-r--r--include/sound/emu10k1.h3
-rw-r--r--include/sound/gus.h1
-rw-r--r--include/sound/hda_codec.h34
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/soc-component.h83
-rw-r--r--include/sound/soc-dai.h7
-rw-r--r--include/sound/soc-dapm.h61
-rw-r--r--include/sound/soc.h5
-rw-r--r--include/sound/soc_sdw_utils.h8
-rw-r--r--include/sound/sof/ipc4/header.h4
-rw-r--r--include/sound/soundfont.h18
-rw-r--r--include/sound/tas2781-dsp.h11
-rw-r--r--include/sound/tas2781.h14
-rw-r--r--include/sound/tas2x20-tlv.h259
-rw-r--r--include/sound/tas5825-tlv.h24
-rw-r--r--include/sound/tlv320dac33-plat.h21
-rw-r--r--include/trace/events/cma.h19
-rw-r--r--include/trace/events/dma.h9
-rw-r--r--include/trace/events/fib.h4
-rw-r--r--include/trace/events/habanalabs.h2
-rw-r--r--include/trace/events/huge_memory.h19
-rw-r--r--include/trace/events/io_uring.h4
-rw-r--r--include/trace/events/kmem.h5
-rw-r--r--include/trace/events/page_ref.h4
-rw-r--r--include/trace/events/readahead.h132
-rw-r--r--include/trace/misc/fs.h22
-rw-r--r--include/uapi/drm/amdgpu_drm.h72
-rw-r--r--include/uapi/drm/amdxdna_accel.h136
-rw-r--r--include/uapi/drm/drm.h63
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/panthor_drm.h3
-rw-r--r--include/uapi/drm/rocket_accel.h142
-rw-r--r--include/uapi/drm/v3d_drm.h2
-rw-r--r--include/uapi/drm/xe_drm.h282
-rw-r--r--include/uapi/linux/aspeed-video.h7
-rw-r--r--include/uapi/linux/can/netlink.h14
-rw-r--r--include/uapi/linux/devlink.h2
-rw-r--r--include/uapi/linux/dpll.h1
-rw-r--r--include/uapi/linux/ethtool.h1
-rw-r--r--include/uapi/linux/ethtool_netlink_generated.h12
-rw-r--r--include/uapi/linux/ext4.h53
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/if_bridge.h3
-rw-r--r--include/uapi/linux/if_link.h3
-rw-r--r--include/uapi/linux/io_uring.h38
-rw-r--r--include/uapi/linux/io_uring/query.h41
-rw-r--r--include/uapi/linux/ivtv.h2
-rw-r--r--include/uapi/linux/kexec.h4
-rw-r--r--include/uapi/linux/mempolicy.h12
-rw-r--r--include/uapi/linux/mptcp.h22
-rw-r--r--include/uapi/linux/mptcp_pm.h4
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/uapi/linux/nl80211.h255
-rw-r--r--include/uapi/linux/prctl.h10
-rw-r--r--include/uapi/linux/psp.h66
-rw-r--r--include/uapi/linux/ptp_clock.h4
-rw-r--r--include/uapi/linux/stddef.h2
-rw-r--r--include/uapi/linux/tcp.h9
-rw-r--r--include/uapi/linux/tee.h87
-rw-r--r--include/uapi/linux/v4l2-controls.h121
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h2
-rw-r--r--include/uapi/linux/videodev2.h20
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h1
-rw-r--r--include/uapi/rdma/ib_user_sa.h14
-rw-r--r--include/uapi/rdma/ionic-abi.h115
-rw-r--r--include/uapi/rdma/irdma-abi.h16
-rw-r--r--include/uapi/rdma/rdma_user_cm.h42
-rw-r--r--include/uapi/scsi/fc/fc_els.h58
-rw-r--r--include/uapi/sound/compress_offload.h35
-rw-r--r--include/uapi/sound/compress_params.h41
-rw-r--r--include/uapi/sound/intel/avs/tokens.h15
-rw-r--r--include/uapi/sound/snd_ar_tokens.h20
-rw-r--r--include/uapi/sound/sof/tokens.h2
-rw-r--r--include/ufs/ufs.h17
-rw-r--r--include/ufs/ufs_quirks.h3
-rw-r--r--include/ufs/ufshcd.h35
-rw-r--r--include/video/pixel_format.h61
400 files changed, 10084 insertions, 2621 deletions
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 53c98f5fe3c3..a2db36d18419 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -173,8 +173,10 @@ struct acpi_exception_info {
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
#define AE_AML_PROTOCOL EXCEP_AML (0x0024)
#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025)
+#define AE_AML_TOO_FEW_ARGUMENTS EXCEP_AML (0x0026)
+#define AE_AML_TOO_MANY_ARGUMENTS EXCEP_AML (0x0027)
-#define AE_CODE_AML_MAX 0x0025
+#define AE_CODE_AML_MAX 0x0027
/*
* Internal exceptions used for control
@@ -353,7 +355,11 @@ static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
"A target operand of an incorrect type was encountered"),
EXCEP_TXT("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"),
EXCEP_TXT("AE_AML_BUFFER_LENGTH",
- "The length of the buffer is invalid/incorrect")
+ "The length of the buffer is invalid/incorrect"),
+ EXCEP_TXT("AE_AML_TOO_FEW_ARGUMENTS",
+ "There are fewer than expected method arguments"),
+ EXCEP_TXT("AE_AML_TOO_MANY_ARGUMENTS",
+ "There are too many arguments for this method")
};
static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index b49396aa4058..e65a2afe9250 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20250404
+#define ACPI_CA_VERSION 0x20250807
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -214,6 +214,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
+ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
+ * (such as loong_arch) may not have and not use Global Lock.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE);
+
+/*
* Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 243097a3da63..8a67d4ea6e3f 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -73,7 +73,7 @@ struct acpi_table_header {
char oem_id[ACPI_OEM_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM identification */
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM table identification */
u32 oem_revision; /* OEM revision number */
- char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
+ char asl_compiler_id[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII ASL compiler vendor ID */
u32 asl_compiler_revision; /* ASL compiler version */
};
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 99fd1588ff38..0b4c332df25c 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -565,6 +565,7 @@ struct acpi_cedt_cfmws_target_element {
#define ACPI_CEDT_CFMWS_RESTRICT_VOLATILE (1<<2)
#define ACPI_CEDT_CFMWS_RESTRICT_PMEM (1<<3)
#define ACPI_CEDT_CFMWS_RESTRICT_FIXED (1<<4)
+#define ACPI_CEDT_CFMWS_RESTRICT_BI (1<<5)
/* 2: CXL XOR Interleave Math Structure */
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 048f5f47f8b8..f726bce3eb84 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -57,6 +57,7 @@
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_SWFT "SWFT" /* SoundWire File Table */
#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
/*
@@ -3479,6 +3480,26 @@ enum acpi_svkl_format {
};
/*******************************************************************************
+ * SWFT - SoundWire File Table
+ *
+ * Conforms to "Discovery and Configuration (DisCo) Specification for SoundWire"
+ * Version 2.1, 2 October 2023
+ *
+ ******************************************************************************/
+struct acpi_sw_file {
+ u16 vendor_id;
+ u32 file_id;
+ u16 file_version;
+ u32 file_length;
+ u8 data[];
+};
+
+struct acpi_table_swft {
+ struct acpi_table_header header;
+ struct acpi_sw_file files[];
+};
+
+/*******************************************************************************
*
* TDEL - TD-Event Log
* From: "Guest-Host-Communication Interface (GHCI) for Intel
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index d0eccbd920e5..7146a8e9e9c2 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -417,32 +417,15 @@ static inline void acpi_processor_throttling_init(void) {}
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
/* in processor_idle.c */
-extern struct cpuidle_driver acpi_idle_driver;
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-int acpi_processor_power_init(struct acpi_processor *pr);
-int acpi_processor_power_exit(struct acpi_processor *pr);
+void acpi_processor_power_init(struct acpi_processor *pr);
+void acpi_processor_power_exit(struct acpi_processor *pr);
int acpi_processor_power_state_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
-#else
-static inline int acpi_processor_power_init(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_power_exit(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
-
-static inline int acpi_processor_hotplug(struct acpi_processor *pr)
-{
- return -ENODEV;
-}
+void acpi_processor_register_idle_driver(void);
+void acpi_processor_unregister_idle_driver(void);
+int acpi_processor_ffh_lpi_probe(unsigned int cpu);
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
/* in processor_thermal.c */
@@ -465,11 +448,6 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
}
#endif /* CONFIG_CPU_FREQ */
-#ifdef CONFIG_ACPI_PROCESSOR_IDLE
-extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
-extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
-#endif
-
void acpi_processor_init_invariance_cppc(void);
#endif
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 11abad6c87e1..ca5a1ce6f0f8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -75,6 +75,7 @@
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
#include <linux/tracepoint-defs.h>
+#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
DECLARE_TRACEPOINT(rwmmio_write);
DECLARE_TRACEPOINT(rwmmio_post_write);
DECLARE_TRACEPOINT(rwmmio_read);
@@ -91,6 +92,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
#else
+#define rwmmio_tracepoint_enabled(tracepoint) false
static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr, unsigned long caller_addr0) {}
static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
@@ -189,11 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
- log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
- log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -204,11 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
- log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
- log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -219,11 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
- log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
- log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -235,11 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
- log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
- log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -249,11 +259,13 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
- log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -261,11 +273,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
- log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -273,11 +287,13 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
- log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -286,11 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
- log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -306,9 +324,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
u8 val;
- log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
val = __raw_readb(addr);
- log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -319,9 +339,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
u16 val;
- log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
- log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -332,9 +354,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
u32 val;
- log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
- log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -345,9 +369,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
u64 val;
- log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
- log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -356,9 +382,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__raw_writeb(value, addr);
- log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -366,9 +394,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__raw_writew((u16 __force)cpu_to_le16(value), addr);
- log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -376,9 +406,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
- log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -386,9 +418,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
- log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 74d0077cc5fa..efa6610acbc7 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -53,7 +53,7 @@ static inline int pfn_valid(unsigned long pfn)
*/
#define __page_to_pfn(pg) \
({ const struct page *__pg = (pg); \
- int __sec = page_to_section(__pg); \
+ int __sec = memdesc_section(__pg->flags); \
(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
})
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a65a87366c48..8a9a2e732a65 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -361,6 +361,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
__start_once = .; \
*(.data..once) \
__end_once = .; \
+ *(.data..do_once) \
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
@@ -831,6 +832,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
/* Required sections not related to debugging. */
#define ELF_DETAILS \
+ .modinfo : { *(.modinfo) } \
.comment 0 : { *(.comment) } \
.symtab 0 : { *(.symtab) } \
.strtab 0 : { *(.strtab) } \
@@ -1044,7 +1046,6 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
*(.discard.*) \
*(.export_symbol) \
*(.no_trim_symbol) \
- *(.modinfo) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 15ab743f68c8..83d14376ff2b 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -159,7 +159,7 @@ static inline void scatterwalk_map(struct scatter_walk *walk)
if (IS_ENABLED(CONFIG_HIGHMEM)) {
struct page *page;
- page = nth_page(base_page, offset >> PAGE_SHIFT);
+ page = base_page + (offset >> PAGE_SHIFT);
offset = offset_in_page(offset);
addr = kmap_local_page(page) + offset;
} else {
@@ -259,7 +259,7 @@ static inline void scatterwalk_done_dst(struct scatter_walk *walk,
end += (offset_in_page(offset) + offset_in_page(nbytes) +
PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = start; i < end; i++)
- flush_dcache_page(nth_page(base_page, i));
+ flush_dcache_page(base_page + i);
}
scatterwalk_advance(walk, nbytes);
}
diff --git a/include/drm/bridge/dw_dp.h b/include/drm/bridge/dw_dp.h
new file mode 100644
index 000000000000..d05df49fd884
--- /dev/null
+++ b/include/drm/bridge/dw_dp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef __DW_DP__
+#define __DW_DP__
+
+#include <linux/device.h>
+
+struct drm_encoder;
+struct dw_dp;
+
+struct dw_dp_plat_data {
+ u32 max_link_rate;
+};
+
+struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ const struct dw_dp_plat_data *plat_data);
+#endif /* __DW_DP__ */
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
index 9764d6eb5beb..31d7ed589233 100644
--- a/include/drm/bridge/samsung-dsim.h
+++ b/include/drm/bridge/samsung-dsim.h
@@ -29,6 +29,7 @@ enum samsung_dsim_type {
DSIM_TYPE_EXYNOS5410,
DSIM_TYPE_EXYNOS5422,
DSIM_TYPE_EXYNOS5433,
+ DSIM_TYPE_EXYNOS7870,
DSIM_TYPE_IMX8MM,
DSIM_TYPE_IMX8MP,
DSIM_TYPE_COUNT,
@@ -53,15 +54,29 @@ struct samsung_dsim_transfer {
struct samsung_dsim_driver_data {
const unsigned int *reg_ofs;
unsigned int plltmr_reg;
+ unsigned int has_legacy_status_reg:1;
unsigned int has_freqband:1;
unsigned int has_clklane_stop:1;
unsigned int has_broken_fifoctrl_emptyhdr:1;
+ unsigned int has_sfrctrl:1;
+ struct clk_bulk_data *clk_data;
unsigned int num_clks;
unsigned int min_freq;
unsigned int max_freq;
+ unsigned int wait_for_hdr_fifo;
unsigned int wait_for_reset;
unsigned int num_bits_resol;
+ unsigned int video_mode_bit;
+ unsigned int pll_stable_bit;
+ unsigned int esc_clken_bit;
+ unsigned int byte_clken_bit;
+ unsigned int tx_req_hsclk_bit;
+ unsigned int lane_esc_clk_bit;
+ unsigned int lane_esc_data_offset;
unsigned int pll_p_offset;
+ unsigned int pll_m_offset;
+ unsigned int pll_s_offset;
+ unsigned int main_vsa_offset;
const unsigned int *reg_values;
unsigned int pll_fin_min;
unsigned int pll_fin_max;
@@ -91,7 +106,6 @@ struct samsung_dsim {
void __iomem *reg_base;
struct phy *phy;
- struct clk **clks;
struct clk *pll_clk;
struct regulator_bulk_data supplies[2];
int irq;
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index b0e6653ee42e..76e05930f50e 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -23,6 +23,7 @@
#ifndef __DRM_BRIDGE_H__
#define __DRM_BRIDGE_H__
+#include <linux/cleanup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -1171,6 +1172,10 @@ struct drm_bridge {
*/
bool pre_enable_prev_first;
/**
+ * @support_hdcp: Indicate that the bridge supports HDCP.
+ */
+ bool support_hdcp;
+ /**
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
@@ -1276,6 +1281,9 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
void drm_bridge_put(struct drm_bridge *bridge);
+/* Cleanup action for use with __free() */
+DEFINE_FREE(drm_bridge_put, struct drm_bridge *, if (_T) drm_bridge_put(_T))
+
void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
const struct drm_bridge_funcs *funcs);
@@ -1313,6 +1321,11 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
}
#endif
+static inline bool drm_bridge_is_last(struct drm_bridge *bridge)
+{
+ return list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain);
+}
+
/**
* drm_bridge_get_current_state() - Get the current bridge state
* @bridge: bridge object
@@ -1365,6 +1378,13 @@ drm_bridge_get_next_bridge(struct drm_bridge *bridge)
* drm_bridge_get_prev_bridge() - Get the previous bridge in the chain
* @bridge: bridge object
*
+ * The caller is responsible of having a reference to @bridge via
+ * drm_bridge_get() or equivalent. This function leaves the refcount of
+ * @bridge unmodified.
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the previous bridge in the chain, or NULL if @bridge is the first.
*/
@@ -1374,13 +1394,16 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain))
return NULL;
- return list_prev_entry(bridge, chain_node);
+ return drm_bridge_get(list_prev_entry(bridge, chain_node));
}
/**
* drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain
* @encoder: encoder object
*
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
* RETURNS:
* the first bridge in the chain, or NULL if @encoder has no bridge attached
* to it.
@@ -1388,8 +1411,26 @@ drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
static inline struct drm_bridge *
drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder)
{
- return list_first_entry_or_null(&encoder->bridge_chain,
- struct drm_bridge, chain_node);
+ return drm_bridge_get(list_first_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
+}
+
+/**
+ * drm_bridge_chain_get_last_bridge() - Get the last bridge in the chain
+ * @encoder: encoder object
+ *
+ * The refcount of the returned bridge is incremented. Use drm_bridge_put()
+ * when done with it.
+ *
+ * RETURNS:
+ * the last bridge in the chain, or NULL if @encoder has no bridge attached
+ * to it.
+ */
+static inline struct drm_bridge *
+drm_bridge_chain_get_last_bridge(struct drm_encoder *encoder)
+{
+ return drm_bridge_get(list_last_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
}
/**
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 513837632b7d..04afd7c21a82 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -13,15 +13,6 @@
#include <drm/drm_print.h>
-#define range_overflows(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ >= max__ || size__ > max__ - start__; \
-})
-
#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index 6cb577f6dba6..eccb71ab335a 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -143,6 +143,7 @@ void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_ga
void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
drm_crtc_set_lut_func set_palette);
+void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
#endif
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index a33aedd5e9ec..778b2cca6c49 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -26,10 +26,14 @@ struct pci_controller;
* Recovery methods for wedged device in order of less to more side-effects.
* To be used with drm_dev_wedged_event() as recovery @method. Callers can
* use any one, multiple (or'd) or none depending on their needs.
+ *
+ * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
+ * details.
*/
#define DRM_WEDGE_RECOVERY_NONE BIT(0) /* optional telemetry collection */
#define DRM_WEDGE_RECOVERY_REBIND BIT(1) /* unbind + bind driver */
#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */
+#define DRM_WEDGE_RECOVERY_VENDOR BIT(3) /* vendor specific recovery method */
/**
* struct drm_wedge_task_info - information about the guilty task of a wedge dev
@@ -189,16 +193,6 @@ struct drm_device {
char *unique;
/**
- * @struct_mutex:
- *
- * Lock for others (not &drm_minor.master and &drm_file.is_master)
- *
- * TODO: This lock used to be the BKL of the DRM subsystem. Move the
- * lock into i915, which is the only remaining user.
- */
- struct mutex struct_mutex;
-
- /**
* @master_mutex:
*
* Lock for &drm_minor.master and &drm_file.is_master
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 562bc383ece4..32d57d6c5327 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -136,4 +136,8 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index d3a7b43e2c63..8d48d2af2649 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -398,19 +398,34 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
- * @gpuva:
+ * @gpuva: Fields used by GPUVM to manage mappings pointing to this GEM object.
*
- * Provides the list of GPU VAs attached to this GEM object.
+ * When DRM_GPUVM_IMMEDIATE_MODE is set, this list is protected by the
+ * mutex. Otherwise, the list is protected by the GEMs &dma_resv lock.
*
- * Drivers should lock list accesses with the GEMs &dma_resv lock
- * (&drm_gem_object.resv) or a custom lock if one is provided.
+ * Note that all entries in this list must agree on whether
+ * DRM_GPUVM_IMMEDIATE_MODE is set.
*/
struct {
+ /**
+ * @gpuva.list: list of GPUVM mappings attached to this GEM object.
+ *
+ * Drivers should lock list accesses with either the GEMs
+ * &dma_resv lock (&drm_gem_object.resv) or the
+ * &drm_gem_object.gpuva.lock mutex.
+ */
struct list_head list;
-#ifdef CONFIG_LOCKDEP
- struct lockdep_map *lock_dep_map;
-#endif
+ /**
+ * @gpuva.lock: lock protecting access to &drm_gem_object.gpuva.list
+ * when DRM_GPUVM_IMMEDIATE_MODE is used.
+ *
+ * Only used when DRM_GPUVM_IMMEDIATE_MODE is set. It should be
+ * safe to take this mutex during the fence signalling path, so
+ * do not allocate memory while holding this lock. Otherwise,
+ * the &dma_resv lock should be used.
+ */
+ struct mutex lock;
} gpuva;
/**
@@ -595,26 +610,12 @@ static inline bool drm_gem_is_imported(const struct drm_gem_object *obj)
}
#ifdef CONFIG_LOCKDEP
-/**
- * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
- * @obj: the &drm_gem_object
- * @lock: the lock used to protect the gpuva list. The locking primitive
- * must contain a dep_map field.
- *
- * Call this if you're not proctecting access to the gpuva list with the
- * dma-resv lock, but with a custom lock.
- */
-#define drm_gem_gpuva_set_lock(obj, lock) \
- if (!WARN((obj)->gpuva.lock_dep_map, \
- "GEM GPUVA lock should be set only once.")) \
- (obj)->gpuva.lock_dep_map = &(lock)->dep_map
-#define drm_gem_gpuva_assert_lock_held(obj) \
- lockdep_assert((obj)->gpuva.lock_dep_map ? \
- lock_is_held((obj)->gpuva.lock_dep_map) : \
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) \
+ lockdep_assert(drm_gpuvm_immediate_mode(gpuvm) ? \
+ lockdep_is_held(&(obj)->gpuva.lock) : \
dma_resv_held((obj)->resv))
#else
-#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
-#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(gpuvm, obj) do {} while (0)
#endif
/**
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 4aedc5423aff..5434048a2ca4 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -17,7 +17,7 @@ struct drm_gpusvm_notifier;
struct drm_gpusvm_ops;
struct drm_gpusvm_range;
struct drm_pagemap;
-struct drm_pagemap_device_addr;
+struct drm_pagemap_addr;
/**
* struct drm_gpusvm_ops - Operations structure for GPU SVM
@@ -106,16 +106,16 @@ struct drm_gpusvm_notifier {
};
/**
- * struct drm_gpusvm_range_flags - Structure representing a GPU SVM range flags
+ * struct drm_gpusvm_pages_flags - Structure representing a GPU SVM pages flags
*
- * @migrate_devmem: Flag indicating whether the range can be migrated to device memory
- * @unmapped: Flag indicating if the range has been unmapped
- * @partial_unmap: Flag indicating if the range has been partially unmapped
- * @has_devmem_pages: Flag indicating if the range has devmem pages
- * @has_dma_mapping: Flag indicating if the range has a DMA mapping
- * @__flags: Flags for range in u16 form (used for READ_ONCE)
+ * @migrate_devmem: Flag indicating whether the pages can be migrated to device memory
+ * @unmapped: Flag indicating if the pages has been unmapped
+ * @partial_unmap: Flag indicating if the pages has been partially unmapped
+ * @has_devmem_pages: Flag indicating if the pages has devmem pages
+ * @has_dma_mapping: Flag indicating if the pages has a DMA mapping
+ * @__flags: Flags for pages in u16 form (used for READ_ONCE)
*/
-struct drm_gpusvm_range_flags {
+struct drm_gpusvm_pages_flags {
union {
struct {
/* All flags below must be set upon creation */
@@ -131,6 +131,27 @@ struct drm_gpusvm_range_flags {
};
/**
+ * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages
+ *
+ * @dma_addr: Device address array
+ * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
+ * Note this is assuming only one drm_pagemap per range is allowed.
+ * @notifier_seq: Notifier sequence number of the range's pages
+ * @flags: Flags for range
+ * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
+ * @flags.unmapped: Flag indicating if the range has been unmapped
+ * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
+ * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
+ * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
+ */
+struct drm_gpusvm_pages {
+ struct drm_pagemap_addr *dma_addr;
+ struct drm_pagemap *dpagemap;
+ unsigned long notifier_seq;
+ struct drm_gpusvm_pages_flags flags;
+};
+
+/**
* struct drm_gpusvm_range - Structure representing a GPU SVM range
*
* @gpusvm: Pointer to the GPU SVM structure
@@ -138,11 +159,7 @@ struct drm_gpusvm_range_flags {
* @refcount: Reference count for the range
* @itree: Interval tree node for the range (inserted in GPU SVM notifier)
* @entry: List entry to fast interval tree traversal
- * @notifier_seq: Notifier sequence number of the range's pages
- * @dma_addr: Device address array
- * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
- * Note this is assuming only one drm_pagemap per range is allowed.
- * @flags: Flags for range
+ * @pages: The pages for this range.
*
* This structure represents a GPU SVM range used for tracking memory ranges
* mapped in a DRM device.
@@ -153,10 +170,7 @@ struct drm_gpusvm_range {
struct kref refcount;
struct interval_tree_node itree;
struct list_head entry;
- unsigned long notifier_seq;
- struct drm_pagemap_device_addr *dma_addr;
- struct drm_pagemap *dpagemap;
- struct drm_gpusvm_range_flags flags;
+ struct drm_gpusvm_pages pages;
};
/**
@@ -282,6 +296,10 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
unsigned long end);
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end);
+
struct drm_gpusvm_range *
drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
unsigned long end);
@@ -289,6 +307,22 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
const struct mmu_notifier_range *mmu_range);
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx);
+
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages);
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
@@ -434,4 +468,70 @@ __drm_gpusvm_range_next(struct drm_gpusvm_range *range)
(range__) && (drm_gpusvm_range_start(range__) < (end__)); \
(range__) = __drm_gpusvm_range_next(range__))
+/**
+ * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
+ * @range__: Iterator variable for the ranges
+ * @next__: Iterator variable for the ranges temporay storage
+ * @notifier__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the range
+ * @end__: End address of the range
+ *
+ * This macro is used to iterate over GPU SVM ranges in a notifier while
+ * removing ranges from it.
+ */
+#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
+ for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_range_next(range__); \
+ (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
+ (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
+
+/**
+ * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
+ * @notifier: a pointer to the current drm_gpusvm_notifier
+ *
+ * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
+ * the current notifier is the last one or if the input notifier is
+ * NULL.
+ */
+static inline struct drm_gpusvm_notifier *
+__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
+{
+ if (notifier && !list_is_last(&notifier->entry,
+ &notifier->gpusvm->notifier_list))
+ return list_next_entry(notifier, entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
+ */
+#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = __drm_gpusvm_notifier_next(notifier__))
+
+/**
+ * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
+ * @notifier__: Iterator variable for the notifiers
+ * @next__: Iterator variable for the notifiers temporay storage
+ * @gpusvm__: Pointer to the GPU SVM notifier
+ * @start__: Start address of the notifier
+ * @end__: End address of the notifier
+ *
+ * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
+ * removing notifiers from it.
+ */
+#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
+ for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \
+ (next__) = __drm_gpusvm_notifier_next(notifier__); \
+ (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
+ (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+
#endif /* __DRM_GPUSVM_H__ */
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 2e7088264355..8890ded1d907 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -160,15 +160,6 @@ struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
-static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
-{
- va->va.addr = addr;
- va->va.range = range;
- va->gem.obj = obj;
- va->gem.offset = offset;
-}
-
/**
* drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
* invalidated
@@ -206,9 +197,19 @@ enum drm_gpuvm_flags {
DRM_GPUVM_RESV_PROTECTED = BIT(0),
/**
+ * @DRM_GPUVM_IMMEDIATE_MODE: use the locking scheme for GEMs designed
+ * for modifying the GPUVM during the fence signalling path
+ *
+ * When set, gpuva.lock is used to protect gpuva.list in all GEM
+ * objects associated with this GPUVM. Otherwise, the GEMs dma-resv is
+ * used.
+ */
+ DRM_GPUVM_IMMEDIATE_MODE = BIT(1),
+
+ /**
* @DRM_GPUVM_USERBITS: user defined bits
*/
- DRM_GPUVM_USERBITS = BIT(1),
+ DRM_GPUVM_USERBITS = BIT(2),
};
/**
@@ -379,6 +380,19 @@ drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm)
}
/**
+ * drm_gpuvm_immediate_mode() - indicates whether &DRM_GPUVM_IMMEDIATE_MODE is
+ * set
+ * @gpuvm: the &drm_gpuvm
+ *
+ * Returns: true if &DRM_GPUVM_IMMEDIATE_MODE is set, false otherwise.
+ */
+static inline bool
+drm_gpuvm_immediate_mode(struct drm_gpuvm *gpuvm)
+{
+ return gpuvm->flags & DRM_GPUVM_IMMEDIATE_MODE;
+}
+
+/**
* drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv
* @gpuvm__: the &drm_gpuvm
*
@@ -751,9 +765,10 @@ drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
{
struct drm_gpuvm_bo *vm_bo;
- drm_gem_gpuva_assert_lock_held(obj);
- drm_gem_for_each_gpuvm_bo(vm_bo, obj)
+ drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
+ drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj);
drm_gpuvm_bo_evict(vm_bo, evict);
+ }
}
void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo);
@@ -1058,10 +1073,23 @@ struct drm_gpuva_ops {
*/
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+/**
+ * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]()
+ */
+struct drm_gpuvm_map_req {
+ /**
+ * @op_map: struct drm_gpuva_op_map
+ */
+ struct drm_gpuva_op_map map;
+};
+
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuvm_map_req *req);
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req);
+
struct drm_gpuva_ops *
drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
@@ -1079,8 +1107,10 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
struct drm_gpuva_op_map *op)
{
- drm_gpuva_init(va, op->va.addr, op->va.range,
- op->gem.obj, op->gem.offset);
+ va->va.addr = op->va.addr;
+ va->va.range = op->va.range;
+ va->gem.obj = op->gem.obj;
+ va->gem.offset = op->gem.offset;
}
/**
@@ -1205,16 +1235,14 @@ struct drm_gpuvm_ops {
};
int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset);
+ const struct drm_gpuvm_map_req *req);
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *obj, u64 offset);
+ struct drm_gpuvm_map_req *req);
int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
u64 req_addr, u64 req_range);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 57a869a6f6e8..3aba7b380c8d 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -288,10 +288,12 @@ void mipi_dsi_picture_parameter_set_multi(struct mipi_dsi_multi_context *ctx,
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
-int mipi_dsi_generic_write_chatty(struct mipi_dsi_device *dsi,
- const void *payload, size_t size);
void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size);
+void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *payload, size_t size);
ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
size_t num_params, void *data, size_t size);
u32 drm_mipi_dsi_get_input_bus_fmt(enum mipi_dsi_pixel_format dsi_format);
@@ -332,10 +334,16 @@ int mipi_dsi_dcs_write_buffer_chatty(struct mipi_dsi_device *dsi,
const void *data, size_t len);
void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
const void *data, size_t len);
+void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *data, size_t len);
ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
const void *data, size_t len);
ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
size_t len);
+void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
+ void *data, size_t len);
int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
@@ -383,28 +391,23 @@ void mipi_dsi_dcs_set_tear_scanline_multi(struct mipi_dsi_multi_context *ctx,
void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
/**
- * mipi_dsi_generic_write_seq - transmit data using a generic write packet
- *
- * This macro will print errors for you and will RETURN FROM THE CALLING
- * FUNCTION (yes this is non-intuitive) upon error.
+ * mipi_dsi_generic_write_seq_multi - transmit data using a generic write packet
*
- * Because of the non-intuitive return behavior, THIS MACRO IS DEPRECATED.
- * Please replace calls of it with mipi_dsi_generic_write_seq_multi().
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
*
- * @dsi: DSI peripheral device
+ * @ctx: Context for multiple DSI transactions
* @seq: buffer containing the payload
*/
-#define mipi_dsi_generic_write_seq(dsi, seq...) \
- do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write_chatty(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
+#define mipi_dsi_generic_write_seq_multi(ctx, seq...) \
+ do { \
+ static const u8 d[] = { seq }; \
+ mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
} while (0)
/**
- * mipi_dsi_generic_write_seq_multi - transmit data using a generic write packet
+ * mipi_dsi_generic_write_var_seq_multi - transmit non-constant data using a
+ * generic write packet
*
* This macro will print errors for you and error handling is optimized for
* callers that call this multiple times in a row.
@@ -412,9 +415,9 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
* @ctx: Context for multiple DSI transactions
* @seq: buffer containing the payload
*/
-#define mipi_dsi_generic_write_seq_multi(ctx, seq...) \
- do { \
- static const u8 d[] = { seq }; \
+#define mipi_dsi_generic_write_var_seq_multi(ctx, seq...) \
+ do { \
+ const u8 d[] = { seq }; \
mipi_dsi_generic_write_multi(ctx, d, ARRAY_SIZE(d)); \
} while (0)
@@ -435,6 +438,110 @@ void mipi_dsi_dcs_set_tear_off_multi(struct mipi_dsi_multi_context *ctx);
} while (0)
/**
+ * mipi_dsi_dcs_write_var_seq_multi - transmit a DCS command with non-constant
+ * payload
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: Command
+ * @seq: buffer containing data to be transmitted
+ */
+#define mipi_dsi_dcs_write_var_seq_multi(ctx, cmd, seq...) \
+ do { \
+ const u8 d[] = { cmd, seq }; \
+ mipi_dsi_dcs_write_buffer_multi(ctx, d, ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual - send the same MIPI DSI command to two interfaces
+ *
+ * This macro will send the specified MIPI DSI command twice, once per each of
+ * the two interfaces supplied. This is useful for reducing duplication of code
+ * in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that the _func parameter cannot accept a macro such as
+ * mipi_dsi_generic_write_multi() or mipi_dsi_dcs_write_buffer_multi(). See
+ * mipi_dsi_dual_generic_write_multi() and
+ * mipi_dsi_dual_dcs_write_buffer_multi() instead.
+ *
+ * WARNING: This macro reuses the _func argument and the optional trailing
+ * arguments twice each, which may cause unintended side effects. For example,
+ * adding the postfix increment ++ operator to one of the arguments to be
+ * passed to _func will cause the variable to be incremented twice instead of
+ * once and the variable will be its original value + 1 when sent to _dsi2.
+ *
+ * @_func: MIPI DSI function to pass context and arguments into
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of the MIPI DSI command
+ * @_dsi2: Second DSI interface to act as recipient of the MIPI DSI command
+ * @...: Arguments to pass to MIPI DSI function or macro
+ */
+
+#define mipi_dsi_dual(_func, _ctx, _dsi1, _dsi2, ...) \
+ do { \
+ struct mipi_dsi_multi_context *_ctxcpy = (_ctx); \
+ _ctxcpy->dsi = (_dsi1); \
+ (_func)(_ctxcpy, ##__VA_ARGS__); \
+ _ctxcpy->dsi = (_dsi2); \
+ (_func)(_ctxcpy, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual_generic_write_seq_multi - transmit data using a generic write
+ * packet to two dsi interfaces, one after the other
+ *
+ * This macro will send the specified generic packet twice, once per each of
+ * the two interfaces supplied. This is useful for reducing duplication of code
+ * in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that if an error occurs while transmitting the packet to the first DSI
+ * interface, the packet will not be sent to the second DSI interface.
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of packet
+ * @_dsi2: Second DSI interface to act as recipient of packet
+ * @_seq: buffer containing the payload
+ */
+#define mipi_dsi_dual_generic_write_seq_multi(_ctx, _dsi1, _dsi2, _seq...) \
+ do { \
+ static const u8 d[] = { _seq }; \
+ mipi_dsi_dual_generic_write_multi(_ctx, _dsi1, _dsi2, d, \
+ ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
+ * mipi_dsi_dual_dcs_write_seq_multi - transmit a DCS command with payload to
+ * two dsi interfaces, one after the other
+ *
+ * This macro will send the specified DCS command with payload twice, once per
+ * each of the two interfaces supplied. This is useful for reducing duplication
+ * of code in panel drivers which use two parallel serial interfaces.
+ *
+ * Note that if an error occurs while transmitting the payload to the first DSI
+ * interface, the payload will not be sent to the second DSI interface.
+ *
+ * This macro will print errors for you and error handling is optimized for
+ * callers that call this multiple times in a row.
+ *
+ * @_ctx: Context for multiple DSI transactions
+ * @_dsi1: First DSI interface to act as recipient of packet
+ * @_dsi2: Second DSI interface to act as recipient of packet
+ * @_cmd: Command
+ * @_seq: buffer containing the payload
+ */
+#define mipi_dsi_dual_dcs_write_seq_multi(_ctx, _dsi1, _dsi2, _cmd, _seq...) \
+ do { \
+ static const u8 d[] = { _cmd, _seq }; \
+ mipi_dsi_dual_dcs_write_buffer_multi(_ctx, _dsi1, _dsi2, d, \
+ ARRAY_SIZE(d)); \
+ } while (0)
+
+/**
* struct mipi_dsi_driver - DSI driver
* @driver: device driver model driver
* @probe: callback for device binding
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index e5f20a1235be..f6e7e234c089 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -6,6 +6,8 @@
#include <linux/hmm.h>
#include <linux/types.h>
+#define NR_PAGES(order) (1U << (order))
+
struct drm_pagemap;
struct drm_pagemap_zdd;
struct device;
@@ -23,7 +25,7 @@ enum drm_interconnect_protocol {
};
/**
- * struct drm_pagemap_device_addr - Device address representation.
+ * struct drm_pagemap_addr - Address representation.
* @addr: The dma address or driver-defined address for driver private interconnects.
* @proto: The interconnect protocol.
* @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
@@ -32,7 +34,7 @@ enum drm_interconnect_protocol {
* Note: There is room for improvement here. We should be able to pack into
* 64 bits.
*/
-struct drm_pagemap_device_addr {
+struct drm_pagemap_addr {
dma_addr_t addr;
u64 proto : 54;
u64 order : 8;
@@ -40,21 +42,21 @@ struct drm_pagemap_device_addr {
};
/**
- * drm_pagemap_device_addr_encode() - Encode a dma address with metadata
+ * drm_pagemap_addr_encode() - Encode a dma address with metadata
* @addr: The dma address or driver-defined address for driver private interconnects.
* @proto: The interconnect protocol.
* @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
* @dir: The DMA direction.
*
- * Return: A struct drm_pagemap_device_addr encoding the above information.
+ * Return: A struct drm_pagemap_addr encoding the above information.
*/
-static inline struct drm_pagemap_device_addr
-drm_pagemap_device_addr_encode(dma_addr_t addr,
- enum drm_interconnect_protocol proto,
- unsigned int order,
- enum dma_data_direction dir)
+static inline struct drm_pagemap_addr
+drm_pagemap_addr_encode(dma_addr_t addr,
+ enum drm_interconnect_protocol proto,
+ unsigned int order,
+ enum dma_data_direction dir)
{
- return (struct drm_pagemap_device_addr) {
+ return (struct drm_pagemap_addr) {
.addr = addr,
.proto = proto,
.order = order,
@@ -75,11 +77,11 @@ struct drm_pagemap_ops {
* @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
* @dir: The transfer direction.
*/
- struct drm_pagemap_device_addr (*device_map)(struct drm_pagemap *dpagemap,
- struct device *dev,
- struct page *page,
- unsigned int order,
- enum dma_data_direction dir);
+ struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir);
/**
* @device_unmap: Unmap a device address previously obtained using @device_map.
@@ -90,7 +92,7 @@ struct drm_pagemap_ops {
*/
void (*device_unmap)(struct drm_pagemap *dpagemap,
struct device *dev,
- struct drm_pagemap_device_addr addr);
+ struct drm_pagemap_addr addr);
/**
* @populate_mm: Populate part of the mm with @dpagemap memory,
@@ -170,29 +172,33 @@ struct drm_pagemap_devmem_ops {
/**
* @copy_to_devmem: Copy to device memory (required for migration)
* @pages: Pointer to array of device memory pages (destination)
- * @dma_addr: Pointer to array of DMA addresses (source)
+ * @pagemap_addr: Pointer to array of DMA information (source)
* @npages: Number of pages to copy
*
- * Copy pages to device memory.
+ * Copy pages to device memory. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/
int (*copy_to_devmem)(struct page **pages,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages);
/**
* @copy_to_ram: Copy to system RAM (required for migration)
* @pages: Pointer to array of device memory pages (source)
- * @dma_addr: Pointer to array of DMA addresses (destination)
+ * @pagemap_addr: Pointer to array of DMA information (destination)
* @npages: Number of pages to copy
*
- * Copy pages to system RAM.
+ * Copy pages to system RAM. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/
int (*copy_to_ram)(struct page **pages,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages);
};
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 843fb756a295..2407bfa60236 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -160,6 +160,20 @@ struct drm_panel_follower_funcs {
* Called before the panel is powered off.
*/
int (*panel_unpreparing)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_enabled:
+ *
+ * Called after the panel and the backlight have been enabled.
+ */
+ int (*panel_enabled)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_disabling:
+ *
+ * Called before the panel and the backlight are disabled.
+ */
+ int (*panel_disabling)(struct drm_panel_follower *follower);
};
struct drm_panel_follower {
diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
index 15fa9b6865f4..6a46f755daba 100644
--- a/include/drm/drm_utils.h
+++ b/include/drm/drm_utils.h
@@ -16,7 +16,13 @@ struct drm_edid;
int drm_get_panel_orientation_quirk(int width, int height);
-int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid);
+struct drm_panel_backlight_quirk {
+ u16 min_brightness;
+ u32 brightness_mask;
+};
+
+const struct drm_panel_backlight_quirk *
+drm_get_panel_backlight_quirk(const struct drm_edid *edid);
signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec);
diff --git a/include/drm/intel/i915_component.h b/include/drm/intel/i915_component.h
index 4ea3b17aa143..8082db222e00 100644
--- a/include/drm/intel/i915_component.h
+++ b/include/drm/intel/i915_component.h
@@ -31,6 +31,7 @@ enum i915_component_type {
I915_COMPONENT_HDCP,
I915_COMPONENT_PXP,
I915_COMPONENT_GSC_PROXY,
+ INTEL_COMPONENT_LB,
};
/* MAX_PORT is the number of port
diff --git a/include/drm/intel/intel_lb_mei_interface.h b/include/drm/intel/intel_lb_mei_interface.h
new file mode 100644
index 000000000000..d65be2cba2ab
--- /dev/null
+++ b/include/drm/intel/intel_lb_mei_interface.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Intel Corporation
+ */
+
+#ifndef _INTEL_LB_MEI_INTERFACE_H_
+#define _INTEL_LB_MEI_INTERFACE_H_
+
+#include <linux/types.h>
+
+struct device;
+
+/**
+ * define INTEL_LB_FLAG_IS_PERSISTENT - Mark the payload as persistent
+ *
+ * This flag indicates that the late binding payload should be stored
+ * persistently in flash across warm resets.
+ */
+#define INTEL_LB_FLAG_IS_PERSISTENT BIT(0)
+
+/**
+ * enum intel_lb_type - enum to determine late binding payload type
+ * @INTEL_LB_TYPE_FAN_CONTROL: Fan controller configuration
+ */
+enum intel_lb_type {
+ INTEL_LB_TYPE_FAN_CONTROL = 1,
+};
+
+/**
+ * enum intel_lb_status - Status codes returned on late binding transmissions
+ * @INTEL_LB_STATUS_SUCCESS: Operation completed successfully
+ * @INTEL_LB_STATUS_4ID_MISMATCH: Mismatch in the expected 4ID (firmware identity/token)
+ * @INTEL_LB_STATUS_ARB_FAILURE: Arbitration failure (e.g. conflicting access or state)
+ * @INTEL_LB_STATUS_GENERAL_ERROR: General firmware error not covered by other codes
+ * @INTEL_LB_STATUS_INVALID_PARAMS: One or more input parameters are invalid
+ * @INTEL_LB_STATUS_INVALID_SIGNATURE: Payload has an invalid or untrusted signature
+ * @INTEL_LB_STATUS_INVALID_PAYLOAD: Payload contents are not accepted by firmware
+ * @INTEL_LB_STATUS_TIMEOUT: Operation timed out before completion
+ */
+enum intel_lb_status {
+ INTEL_LB_STATUS_SUCCESS = 0,
+ INTEL_LB_STATUS_4ID_MISMATCH = 1,
+ INTEL_LB_STATUS_ARB_FAILURE = 2,
+ INTEL_LB_STATUS_GENERAL_ERROR = 3,
+ INTEL_LB_STATUS_INVALID_PARAMS = 4,
+ INTEL_LB_STATUS_INVALID_SIGNATURE = 5,
+ INTEL_LB_STATUS_INVALID_PAYLOAD = 6,
+ INTEL_LB_STATUS_TIMEOUT = 7,
+};
+
+/**
+ * struct intel_lb_component_ops - Ops for late binding services
+ */
+struct intel_lb_component_ops {
+ /**
+ * push_payload - Sends a payload to the authentication firmware
+ * @dev: Device struct corresponding to the mei device
+ * @type: Payload type (see &enum intel_lb_type)
+ * @flags: Payload flags bitmap (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT)
+ * @payload: Pointer to payload buffer
+ * @payload_size: Payload buffer size in bytes
+ *
+ * Return: 0 success, negative errno value on transport failure,
+ * positive status returned by firmware
+ */
+ int (*push_payload)(struct device *dev, u32 type, u32 flags,
+ const void *payload, size_t payload_size);
+};
+
+#endif /* _INTEL_LB_MEI_INTERFACE_H_ */
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 76f8d26f9cc9..da6301a6fcea 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -26,6 +26,11 @@
#define __PCIIDS_H__
#ifdef __KERNEL__
+#define INTEL_PCI_DEVICE(_id, _info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+
#define INTEL_VGA_DEVICE(_id, _info) { \
PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
.class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 479b7ed075c0..e664a96540eb 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -466,6 +466,8 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
int ttm_bo_populate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx);
/* Driver LRU walk helpers initially targeted for shrinking. */
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index e52bba15012f..f49daa504c36 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -36,7 +36,7 @@
#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
-#define TTM_NUM_MEM_TYPES 8
+#define TTM_NUM_MEM_TYPES 9
struct dmem_cgroup_device;
struct ttm_device;
diff --git a/include/dt-bindings/clock/axis,artpec8-clk.h b/include/dt-bindings/clock/axis,artpec8-clk.h
new file mode 100644
index 000000000000..1e6e1409dd94
--- /dev/null
+++ b/include/dt-bindings/clock/axis,artpec8-clk.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Device Tree binding constants for ARTPEC-8 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_ARTPEC8_H
+#define _DT_BINDINGS_CLOCK_ARTPEC8_H
+
+/* CMU_CMU */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_DOUT_SHARED0_DIV2 2
+#define CLK_DOUT_SHARED0_DIV3 3
+#define CLK_DOUT_SHARED0_DIV4 4
+#define CLK_FOUT_SHARED1_PLL 5
+#define CLK_DOUT_SHARED1_DIV2 6
+#define CLK_DOUT_SHARED1_DIV3 7
+#define CLK_DOUT_SHARED1_DIV4 8
+#define CLK_FOUT_AUDIO_PLL 9
+#define CLK_DOUT_CMU_BUS 10
+#define CLK_DOUT_CMU_BUS_DLP 11
+#define CLK_DOUT_CMU_CDC_CORE 12
+#define CLK_DOUT_CMU_OTP 13
+#define CLK_DOUT_CMU_CORE_MAIN 14
+#define CLK_DOUT_CMU_CORE_DLP 15
+#define CLK_DOUT_CMU_CPUCL_SWITCH 16
+#define CLK_DOUT_CMU_DLP_CORE 17
+#define CLK_DOUT_CMU_FSYS_BUS 18
+#define CLK_DOUT_CMU_FSYS_IP 19
+#define CLK_DOUT_CMU_FSYS_SCAN0 20
+#define CLK_DOUT_CMU_FSYS_SCAN1 21
+#define CLK_DOUT_CMU_GPU_3D 22
+#define CLK_DOUT_CMU_GPU_2D 23
+#define CLK_DOUT_CMU_IMEM_ACLK 24
+#define CLK_DOUT_CMU_IMEM_JPEG 25
+#define CLK_DOUT_CMU_MIF_SWITCH 26
+#define CLK_DOUT_CMU_MIF_BUSP 27
+#define CLK_DOUT_CMU_PERI_DISP 28
+#define CLK_DOUT_CMU_PERI_IP 29
+#define CLK_DOUT_CMU_PERI_AUDIO 30
+#define CLK_DOUT_CMU_RSP_CORE 31
+#define CLK_DOUT_CMU_TRFM_CORE 32
+#define CLK_DOUT_CMU_VCA_ACE 33
+#define CLK_DOUT_CMU_VCA_OD 34
+#define CLK_DOUT_CMU_VIO_CORE 35
+#define CLK_DOUT_CMU_VIO_AUDIO 36
+#define CLK_DOUT_CMU_VIP0_CORE 37
+#define CLK_DOUT_CMU_VIP1_CORE 38
+#define CLK_DOUT_CMU_VPP_CORE 39
+
+/* CMU_BUS */
+#define CLK_MOUT_BUS_ACLK_USER 1
+#define CLK_MOUT_BUS_DLP_USER 2
+#define CLK_DOUT_BUS_PCLK 3
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_ACLK_USER 1
+#define CLK_MOUT_CORE_DLP_USER 2
+#define CLK_DOUT_CORE_PCLK 3
+
+/* CMU_CPUCL */
+#define CLK_FOUT_CPUCL_PLL 1
+#define CLK_MOUT_CPUCL_PLL 2
+#define CLK_MOUT_CPUCL_SWITCH_USER 3
+#define CLK_DOUT_CPUCL_CPU 4
+#define CLK_DOUT_CPUCL_CLUSTER_ACLK 5
+#define CLK_DOUT_CPUCL_CLUSTER_PCLKDBG 6
+#define CLK_DOUT_CPUCL_CLUSTER_CNTCLK 7
+#define CLK_DOUT_CPUCL_CLUSTER_ATCLK 8
+#define CLK_DOUT_CPUCL_PCLK 9
+#define CLK_DOUT_CPUCL_CMUREF 10
+#define CLK_DOUT_CPUCL_DBG 11
+#define CLK_DOUT_CPUCL_PCLKDBG 12
+#define CLK_GOUT_CPUCL_CLUSTER_CPU 13
+#define CLK_GOUT_CPUCL_SHORTSTOP 14
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG 15
+#define CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK 16
+
+/* CMU_FSYS */
+#define CLK_FOUT_FSYS_PLL 1
+#define CLK_MOUT_FSYS_SCAN0_USER 2
+#define CLK_MOUT_FSYS_SCAN1_USER 3
+#define CLK_MOUT_FSYS_BUS_USER 4
+#define CLK_MOUT_FSYS_MMC_USER 5
+#define CLK_DOUT_FSYS_PCIE_PIPE 6
+#define CLK_DOUT_FSYS_ADC 7
+#define CLK_DOUT_FSYS_PCIE_PHY_REFCLK_SYSPLL 8
+#define CLK_DOUT_FSYS_EQOS_INT125 9
+#define CLK_DOUT_FSYS_OTP_MEM 10
+#define CLK_DOUT_FSYS_SCLK_UART 11
+#define CLK_DOUT_FSYS_EQOS_25 12
+#define CLK_DOUT_FSYS_EQOS_2p5 13
+#define CLK_DOUT_FSYS_BUS300 14
+#define CLK_DOUT_FSYS_BUS_QSPI 15
+#define CLK_DOUT_FSYS_MMC_CARD0 16
+#define CLK_DOUT_FSYS_MMC_CARD1 17
+#define CLK_DOUT_SCAN_CLK_FSYS_125 18
+#define CLK_DOUT_FSYS_QSPI 19
+#define CLK_DOUT_FSYS_SFMC_NAND 20
+#define CLK_DOUT_FSYS_SCAN_CLK_MMC 21
+#define CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 22
+#define CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 23
+#define CLK_GOUT_FSYS_XHB_USB_IPCLKPORT_CLK 24
+#define CLK_GOUT_FSYS_XHB_AHBBR_IPCLKPORT_CLK 25
+#define CLK_GOUT_FSYS_I2C0_IPCLKPORT_I_PCLK 26
+#define CLK_GOUT_FSYS_I2C1_IPCLKPORT_I_PCLK 27
+#define CLK_GOUT_FSYS_PWM_IPCLKPORT_I_PCLK_S0 28
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 29
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INXT_0_SLV_ACLK_UG 30
+#define CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 31
+#define CLK_GOUT_FSYS_PIPE_PAL_INST_0_I_APB_PCLK 32
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_ACLK_I 33
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_CLK_CSR_I 34
+#define CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_I_RGMII_TXCLK_2P5 35
+#define CLK_GOUT_FSYS_SFMC_IPCLKPORT_I_ACLK_NAND 36
+#define CLK_GOUT_FSYS_MMC0_IPCLKPORT_SDCLKIN 37
+#define CLK_GOUT_FSYS_MMC0_IPCLKPORT_I_ACLK 38
+#define CLK_GOUT_FSYS_MMC1_IPCLKPORT_SDCLKIN 39
+#define CLK_GOUT_FSYS_MMC1_IPCLKPORT_I_ACLK 40
+#define CLK_GOUT_FSYS_PCIE_PHY_REFCLK_IN 41
+#define CLK_GOUT_FSYS_UART0_PCLK 42
+#define CLK_GOUT_FSYS_UART0_SCLK_UART 43
+#define CLK_GOUT_FSYS_BUS_QSPI 44
+#define CLK_GOUT_FSYS_QSPI_IPCLKPORT_HCLK 45
+#define CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK 46
+
+/* CMU_IMEM */
+#define CLK_MOUT_IMEM_ACLK_USER 1
+#define CLK_MOUT_IMEM_GIC_CA53 2
+#define CLK_MOUT_IMEM_GIC_CA5 3
+#define CLK_MOUT_IMEM_JPEG_USER 4
+#define CLK_GOUT_IMEM_MCT_PCLK 5
+#define CLK_GOUT_IMEM_PCLK_TMU0_APBIF 6
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_IP_USER 1
+#define CLK_MOUT_PERI_AUDIO_USER 2
+#define CLK_MOUT_PERI_I2S0 3
+#define CLK_MOUT_PERI_I2S1 4
+#define CLK_MOUT_PERI_DISP_USER 5
+#define CLK_DOUT_PERI_SPI 6
+#define CLK_DOUT_PERI_UART1 7
+#define CLK_DOUT_PERI_UART2 8
+#define CLK_DOUT_PERI_PCLK 9
+#define CLK_DOUT_PERI_I2S0 10
+#define CLK_DOUT_PERI_I2S1 11
+#define CLK_DOUT_PERI_DSIM 12
+#define CLK_GOUT_PERI_UART1_PCLK 13
+#define CLK_GOUT_PERI_UART1_SCLK_UART 14
+#define CLK_GOUT_PERI_UART2_PCLK 15
+#define CLK_GOUT_PERI_UART2_SCLK_UART 16
+#define CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK 17
+#define CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK 18
+#define CLK_GOUT_PERI_SPI0_PCLK 19
+#define CLK_GOUT_PERI_SPI0_SCLK_SPI 20
+#define CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 21
+#define CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK_HST 22
+#define CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK_HST 23
+#define CLK_GOUT_PERI_AUDIO_OUT_IPCLKPORT_CLK 24
+#define CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK 25
+#define CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK 26
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK 27
+#define CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK 28
+
+#endif /* _DT_BINDINGS_CLOCK_ARTPEC8_H */
diff --git a/include/dt-bindings/clock/qcom,apss-ipq.h b/include/dt-bindings/clock/qcom,apss-ipq.h
index 77b6e05492e2..0bb41e5efdef 100644
--- a/include/dt-bindings/clock/qcom,apss-ipq.h
+++ b/include/dt-bindings/clock/qcom,apss-ipq.h
@@ -8,5 +8,11 @@
#define APCS_ALIAS0_CLK_SRC 0
#define APCS_ALIAS0_CORE_CLK 1
+#define APSS_PLL_EARLY 2
+#define APSS_SILVER_CLK_SRC 3
+#define APSS_SILVER_CORE_CLK 4
+#define L3_PLL 5
+#define L3_CLK_SRC 6
+#define L3_CORE_CLK 7
#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7280.h b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
index a4a692c20acf..9f113f346be8 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sc7280.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sc7280.h
@@ -52,4 +52,8 @@
/* DISP_CC power domains */
#define DISP_CC_MDSS_CORE_GDSC 0
+/* DISPCC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
#endif
diff --git a/include/dt-bindings/clock/raspberrypi,rp1-clocks.h b/include/dt-bindings/clock/raspberrypi,rp1-clocks.h
index 248efb895f35..7915fb8197bf 100644
--- a/include/dt-bindings/clock/raspberrypi,rp1-clocks.h
+++ b/include/dt-bindings/clock/raspberrypi,rp1-clocks.h
@@ -58,4 +58,8 @@
#define RP1_PLL_VIDEO_PRI_PH 43
#define RP1_PLL_AUDIO_TERN 44
+/* MIPI clocks managed by the DSI driver */
+#define RP1_CLK_MIPI0_DSI_BYTECLOCK 45
+#define RP1_CLK_MIPI1_DSI_BYTECLOCK 46
+
#endif
diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
index 7ecc4f0b235a..0c2ce81a8744 100644
--- a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
@@ -25,5 +25,6 @@
#define R9A09G077_CLK_PCLKM 13
#define R9A09G077_CLK_PCLKL 14
#define R9A09G077_SDHI_CLKHS 15
+#define R9A09G077_USB_CLK 16
#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
index 925e57703925..70ee883f2386 100644
--- a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
+++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
@@ -25,5 +25,6 @@
#define R9A09G087_CLK_PCLKM 13
#define R9A09G087_CLK_PCLKL 14
#define R9A09G087_SDHI_CLKHS 15
+#define R9A09G087_USB_CLK 16
#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */
diff --git a/include/dt-bindings/clock/sun55i-a523-ccu.h b/include/dt-bindings/clock/sun55i-a523-ccu.h
index c8259ac5ada7..54808fcfd556 100644
--- a/include/dt-bindings/clock/sun55i-a523-ccu.h
+++ b/include/dt-bindings/clock/sun55i-a523-ccu.h
@@ -185,5 +185,6 @@
#define CLK_FANOUT0 176
#define CLK_FANOUT1 177
#define CLK_FANOUT2 178
+#define CLK_NPU 179
#endif /* _DT_BINDINGS_CLK_SUN55I_A523_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h b/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h
new file mode 100644
index 000000000000..6efc6bc7e11a
--- /dev/null
+++ b/include/dt-bindings/clock/sun55i-a523-mcu-ccu.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_
+#define _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_
+
+#define CLK_MCU_PLL_AUDIO1 0
+#define CLK_MCU_PLL_AUDIO1_DIV2 1
+#define CLK_MCU_PLL_AUDIO1_DIV5 2
+#define CLK_MCU_AUDIO_OUT 3
+#define CLK_MCU_DSP 4
+#define CLK_MCU_I2S0 5
+#define CLK_MCU_I2S1 6
+#define CLK_MCU_I2S2 7
+#define CLK_MCU_I2S3 8
+#define CLK_MCU_I2S3_ASRC 9
+#define CLK_BUS_MCU_I2S0 10
+#define CLK_BUS_MCU_I2S1 11
+#define CLK_BUS_MCU_I2S2 12
+#define CLK_BUS_MCU_I2S3 13
+#define CLK_MCU_SPDIF_TX 14
+#define CLK_MCU_SPDIF_RX 15
+#define CLK_BUS_MCU_SPDIF 16
+#define CLK_MCU_DMIC 17
+#define CLK_BUS_MCU_DMIC 18
+#define CLK_MCU_AUDIO_CODEC_DAC 19
+#define CLK_MCU_AUDIO_CODEC_ADC 20
+#define CLK_BUS_MCU_AUDIO_CODEC 21
+#define CLK_BUS_MCU_DSP_MSGBOX 22
+#define CLK_BUS_MCU_DSP_CFG 23
+#define CLK_BUS_MCU_NPU_HCLK 24
+#define CLK_BUS_MCU_NPU_ACLK 25
+#define CLK_MCU_TIMER0 26
+#define CLK_MCU_TIMER1 27
+#define CLK_MCU_TIMER2 28
+#define CLK_MCU_TIMER3 29
+#define CLK_MCU_TIMER4 30
+#define CLK_MCU_TIMER5 31
+#define CLK_BUS_MCU_TIMER 32
+#define CLK_BUS_MCU_DMA 33
+#define CLK_MCU_TZMA0 34
+#define CLK_MCU_TZMA1 35
+#define CLK_BUS_MCU_PUBSRAM 36
+#define CLK_MCU_MBUS_DMA 37
+#define CLK_MCU_MBUS 38
+#define CLK_MCU_RISCV 39
+#define CLK_BUS_MCU_RISCV_CFG 40
+#define CLK_BUS_MCU_RISCV_MSGBOX 41
+#define CLK_MCU_PWM0 42
+#define CLK_BUS_MCU_PWM0 43
+
+#endif /* _DT_BINDINGS_CLK_SUN55I_A523_MCU_CCU_H_ */
diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h
index f193663e6f28..763b81f80908 100644
--- a/include/dt-bindings/clock/tegra30-car.h
+++ b/include/dt-bindings/clock/tegra30-car.h
@@ -271,6 +271,7 @@
#define TEGRA30_CLK_AUDIO3_MUX 306
#define TEGRA30_CLK_AUDIO4_MUX 307
#define TEGRA30_CLK_SPDIF_MUX 308
-#define TEGRA30_CLK_CLK_MAX 309
+#define TEGRA30_CLK_CSIA_PAD 309
+#define TEGRA30_CLK_CSIB_PAD 310
#endif /* _DT_BINDINGS_CLOCK_TEGRA30_CAR_H */
diff --git a/include/dt-bindings/interconnect/qcom,ipq5424.h b/include/dt-bindings/interconnect/qcom,ipq5424.h
index a770356112ee..afd7e0683a24 100644
--- a/include/dt-bindings/interconnect/qcom,ipq5424.h
+++ b/include/dt-bindings/interconnect/qcom,ipq5424.h
@@ -21,4 +21,7 @@
#define MASTER_CNOC_USB 16
#define SLAVE_CNOC_USB 17
+#define MASTER_CPU 0
+#define SLAVE_L3 1
+
#endif /* INTERCONNECT_QCOM_IPQ5424_H */
diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h
index dda00c038530..ba34c420c303 100644
--- a/include/dt-bindings/media/tvp5150.h
+++ b/include/dt-bindings/media/tvp5150.h
@@ -2,7 +2,7 @@
/*
tvp5150.h - definition for tvp5150 inputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
index 5e082547f179..881bf78aa8b2 100644
--- a/include/dt-bindings/memory/tegra210-mc.h
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -75,4 +75,78 @@
#define TEGRA210_MC_RESET_ETR 28
#define TEGRA210_MC_RESET_TSECB 29
+#define TEGRA210_MC_PTCR 0
+#define TEGRA210_MC_DISPLAY0A 1
+#define TEGRA210_MC_DISPLAY0AB 2
+#define TEGRA210_MC_DISPLAY0B 3
+#define TEGRA210_MC_DISPLAY0BB 4
+#define TEGRA210_MC_DISPLAY0C 5
+#define TEGRA210_MC_DISPLAY0CB 6
+#define TEGRA210_MC_AFIR 14
+#define TEGRA210_MC_AVPCARM7R 15
+#define TEGRA210_MC_DISPLAYHC 16
+#define TEGRA210_MC_DISPLAYHCB 17
+#define TEGRA210_MC_HDAR 21
+#define TEGRA210_MC_HOST1XDMAR 22
+#define TEGRA210_MC_HOST1XR 23
+#define TEGRA210_MC_NVENCSRD 28
+#define TEGRA210_MC_PPCSAHBDMAR 29
+#define TEGRA210_MC_PPCSAHBSLVR 30
+#define TEGRA210_MC_SATAR 31
+#define TEGRA210_MC_MPCORER 39
+#define TEGRA210_MC_NVENCSWR 43
+#define TEGRA210_MC_AFIW 49
+#define TEGRA210_MC_AVPCARM7W 50
+#define TEGRA210_MC_HDAW 53
+#define TEGRA210_MC_HOST1XW 54
+#define TEGRA210_MC_MPCOREW 57
+#define TEGRA210_MC_PPCSAHBDMAW 59
+#define TEGRA210_MC_PPCSAHBSLVW 60
+#define TEGRA210_MC_SATAW 61
+#define TEGRA210_MC_ISPRA 68
+#define TEGRA210_MC_ISPWA 70
+#define TEGRA210_MC_ISPWB 71
+#define TEGRA210_MC_XUSB_HOSTR 74
+#define TEGRA210_MC_XUSB_HOSTW 75
+#define TEGRA210_MC_XUSB_DEVR 76
+#define TEGRA210_MC_XUSB_DEVW 77
+#define TEGRA210_MC_ISPRAB 78
+#define TEGRA210_MC_ISPWAB 80
+#define TEGRA210_MC_ISPWBB 81
+#define TEGRA210_MC_TSECSRD 84
+#define TEGRA210_MC_TSECSWR 85
+#define TEGRA210_MC_A9AVPSCR 86
+#define TEGRA210_MC_A9AVPSCW 87
+#define TEGRA210_MC_GPUSRD 88
+#define TEGRA210_MC_GPUSWR 89
+#define TEGRA210_MC_DISPLAYT 90
+#define TEGRA210_MC_SDMMCRA 96
+#define TEGRA210_MC_SDMMCRAA 97
+#define TEGRA210_MC_SDMMCR 98
+#define TEGRA210_MC_SDMMCRAB 99
+#define TEGRA210_MC_SDMMCWA 100
+#define TEGRA210_MC_SDMMCWAA 101
+#define TEGRA210_MC_SDMMCW 102
+#define TEGRA210_MC_SDMMCWAB 103
+#define TEGRA210_MC_VICSRD 108
+#define TEGRA210_MC_VICSWR 109
+#define TEGRA210_MC_VIW 114
+#define TEGRA210_MC_DISPLAYD 115
+#define TEGRA210_MC_NVDECSRD 120
+#define TEGRA210_MC_NVDECSWR 121
+#define TEGRA210_MC_APER 122
+#define TEGRA210_MC_APEW 123
+#define TEGRA210_MC_NVJPGRD 126
+#define TEGRA210_MC_NVJPGWR 127
+#define TEGRA210_MC_SESRD 128
+#define TEGRA210_MC_SESWR 129
+#define TEGRA210_MC_AXIAPR 130
+#define TEGRA210_MC_AXIAPW 131
+#define TEGRA210_MC_ETRR 132
+#define TEGRA210_MC_ETRW 133
+#define TEGRA210_MC_TSECSRDB 134
+#define TEGRA210_MC_TSECSWRB 135
+#define TEGRA210_MC_GPUSRD2 136
+#define TEGRA210_MC_GPUSWR2 137
+
#endif
diff --git a/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h b/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h
new file mode 100644
index 000000000000..43a2b5743a63
--- /dev/null
+++ b/include/dt-bindings/net/renesas,r9a09g077-pcs-miic.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef _DT_BINDINGS_RENASAS_R9A09G077_PCS_MIIC_H
+#define _DT_BINDINGS_RENASAS_R9A09G077_PCS_MIIC_H
+
+/*
+ * Media Interface Connection Matrix
+ * ===========================================================
+ *
+ * Selects the function of the Media interface of the MAC to be used
+ *
+ * SW_MODE[2:0] | Port 0 | Port 1 | Port 2 | Port 3
+ * -------------|-------------|-------------|-------------|-------------
+ * 000b | ETHSW Port0 | ETHSW Port1 | ETHSW Port2 | GMAC1
+ * 001b | ESC Port0 | ESC Port1 | GMAC2 | GMAC1
+ * 010b | ESC Port0 | ESC Port1 | ETHSW Port2 | GMAC1
+ * 011b | ESC Port0 | ESC Port1 | ESC Port2 | GMAC1
+ * 100b | ETHSW Port0 | ESC Port1 | ESC Port2 | GMAC1
+ * 101b | ETHSW Port0 | ESC Port1 | ETHSW Port2 | GMAC1
+ * 110b | ETHSW Port0 | ETHSW Port1 | GMAC2 | GMAC1
+ * 111b | GMAC0 | GMAC1 | GMAC2 | -
+ */
+#define ETHSS_GMAC0_PORT 0
+#define ETHSS_GMAC1_PORT 1
+#define ETHSS_GMAC2_PORT 2
+#define ETHSS_ESC_PORT0 3
+#define ETHSS_ESC_PORT1 4
+#define ETHSS_ESC_PORT2 5
+#define ETHSS_ETHSW_PORT0 6
+#define ETHSS_ETHSW_PORT1 7
+#define ETHSS_ETHSW_PORT2 8
+
+#endif
diff --git a/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h b/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h
new file mode 100644
index 000000000000..f088793f23ee
--- /dev/null
+++ b/include/dt-bindings/pinctrl/renesas,r9a09g077-pinctrl.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for Renesas RZ/T2H family pinctrl bindings.
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__
+#define __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__
+
+#define RZT2H_PINS_PER_PORT 8
+
+/*
+ * Create the pin index from its bank and position numbers and store in
+ * the upper 16 bits the alternate function identifier
+ */
+#define RZT2H_PORT_PINMUX(b, p, f) ((b) * RZT2H_PINS_PER_PORT + (p) | ((f) << 16))
+
+/* Convert a port and pin label to its global pin index */
+#define RZT2H_GPIO(port, pin) ((port) * RZT2H_PINS_PER_PORT + (pin))
+
+#endif /* __DT_BINDINGS_PINCTRL_RENESAS_R9A09G077_PINCTRL_H__ */
diff --git a/include/dt-bindings/reset/nvidia,tegra114-car.h b/include/dt-bindings/reset/nvidia,tegra114-car.h
new file mode 100644
index 000000000000..9b8c320402db
--- /dev/null
+++ b/include/dt-bindings/reset/nvidia,tegra114-car.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * This header provides Tegra114-specific constants for binding
+ * nvidia,tegra114-car.
+ */
+
+#ifndef _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H
+#define _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H
+
+#define TEGRA114_RESET(x) (5 * 32 + (x))
+#define TEGRA114_RST_DFLL_DVCO TEGRA114_RESET(0)
+
+#endif /* _DT_BINDINGS_RESET_NVIDIA_TEGRA114_CAR_H */
diff --git a/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h b/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h
new file mode 100644
index 000000000000..a89a0b44f08b
--- /dev/null
+++ b/include/dt-bindings/reset/sun55i-a523-mcu-ccu.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_
+#define _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_
+
+#define RST_BUS_MCU_I2S0 0
+#define RST_BUS_MCU_I2S1 1
+#define RST_BUS_MCU_I2S2 2
+#define RST_BUS_MCU_I2S3 3
+#define RST_BUS_MCU_SPDIF 4
+#define RST_BUS_MCU_DMIC 5
+#define RST_BUS_MCU_AUDIO_CODEC 6
+#define RST_BUS_MCU_DSP_MSGBOX 7
+#define RST_BUS_MCU_DSP_CFG 8
+#define RST_BUS_MCU_NPU 9
+#define RST_BUS_MCU_TIMER 10
+#define RST_BUS_MCU_DSP_DEBUG 11
+#define RST_BUS_MCU_DSP 12
+#define RST_BUS_MCU_DMA 13
+#define RST_BUS_MCU_PUBSRAM 14
+#define RST_BUS_MCU_RISCV_CFG 15
+#define RST_BUS_MCU_RISCV_DEBUG 16
+#define RST_BUS_MCU_RISCV_CORE 17
+#define RST_BUS_MCU_RISCV_MSGBOX 18
+#define RST_BUS_MCU_PWM0 19
+
+#endif /* _DT_BINDINGS_RST_SUN55I_A523_MCU_CCU_H_ */
diff --git a/include/dt-bindings/reset/thead,th1520-reset.h b/include/dt-bindings/reset/thead,th1520-reset.h
index 00459f160489..ee799286c175 100644
--- a/include/dt-bindings/reset/thead,th1520-reset.h
+++ b/include/dt-bindings/reset/thead,th1520-reset.h
@@ -12,5 +12,12 @@
#define TH1520_RESET_ID_NPU 2
#define TH1520_RESET_ID_WDT0 3
#define TH1520_RESET_ID_WDT1 4
+#define TH1520_RESET_ID_DPU_AHB 5
+#define TH1520_RESET_ID_DPU_AXI 6
+#define TH1520_RESET_ID_DPU_CORE 7
+#define TH1520_RESET_ID_DSI0_APB 8
+#define TH1520_RESET_ID_DSI1_APB 9
+#define TH1520_RESET_ID_HDMI 10
+#define TH1520_RESET_ID_HDMI_APB 11
#endif /* _DT_BINDINGS_TH1520_RESET_H */
diff --git a/include/dt-bindings/thermal/tegra114-soctherm.h b/include/dt-bindings/thermal/tegra114-soctherm.h
new file mode 100644
index 000000000000..b766a61cd1ce
--- /dev/null
+++ b/include/dt-bindings/thermal/tegra114-soctherm.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * This header provides constants for binding nvidia,tegra114-soctherm.
+ */
+
+#ifndef _DT_BINDINGS_THERMAL_TEGRA114_SOCTHERM_H
+#define _DT_BINDINGS_THERMAL_TEGRA114_SOCTHERM_H
+
+#define TEGRA114_SOCTHERM_SENSOR_CPU 0
+#define TEGRA114_SOCTHERM_SENSOR_MEM 1
+#define TEGRA114_SOCTHERM_SENSOR_GPU 2
+#define TEGRA114_SOCTHERM_SENSOR_PLLX 3
+
+#define TEGRA114_SOCTHERM_THROT_LEVEL_NONE 0
+#define TEGRA114_SOCTHERM_THROT_LEVEL_LOW 1
+#define TEGRA114_SOCTHERM_THROT_LEVEL_MED 2
+#define TEGRA114_SOCTHERM_THROT_LEVEL_HIGH 3
+
+#endif
diff --git a/include/kunit/test.h b/include/kunit/test.h
index d958ee53050e..5ec5182b5e57 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -92,6 +92,8 @@ struct kunit_attributes {
* @name: the name of the test case.
* @generate_params: the generator function for parameterized tests.
* @attr: the attributes associated with the test
+ * @param_init: The init function to run before a parameterized test.
+ * @param_exit: The exit function to run after a parameterized test.
*
* A test case is a function with the signature,
* ``void (*)(struct kunit *)``
@@ -126,8 +128,11 @@ struct kunit_attributes {
struct kunit_case {
void (*run_case)(struct kunit *test);
const char *name;
- const void* (*generate_params)(const void *prev, char *desc);
+ const void* (*generate_params)(struct kunit *test,
+ const void *prev, char *desc);
struct kunit_attributes attr;
+ int (*param_init)(struct kunit *test);
+ void (*param_exit)(struct kunit *test);
/* private: internal use only. */
enum kunit_status status;
@@ -219,6 +224,31 @@ static inline char *kunit_status_to_ok_not_ok(enum kunit_status status)
.attr = attributes, .module_name = KBUILD_MODNAME}
/**
+ * KUNIT_CASE_PARAM_WITH_INIT - Define a parameterized KUnit test case with custom
+ * param_init() and param_exit() functions.
+ * @test_name: The function implementing the test case.
+ * @gen_params: The function to generate parameters for the test case.
+ * @init: A reference to the param_init() function to run before a parameterized test.
+ * @exit: A reference to the param_exit() function to run after a parameterized test.
+ *
+ * Provides the option to register param_init() and param_exit() functions.
+ * param_init/exit will be passed the parameterized test context and run once
+ * before and once after the parameterized test. The init function can be used
+ * to add resources to share between parameter runs, pass parameter arrays,
+ * and any other setup logic. The exit function can be used to clean up resources
+ * that were not managed by the parameterized test, and any other teardown logic.
+ *
+ * Note: If you are registering a parameter array in param_init() with
+ * kunit_register_param_array() then you need to pass kunit_array_gen_params()
+ * to this as the generator function.
+ */
+#define KUNIT_CASE_PARAM_WITH_INIT(test_name, gen_params, init, exit) \
+ { .run_case = test_name, .name = #test_name, \
+ .generate_params = gen_params, \
+ .param_init = init, .param_exit = exit, \
+ .module_name = KBUILD_MODNAME}
+
+/**
* struct kunit_suite - describes a related collection of &struct kunit_case
*
* @name: the name of the test. Purely informational.
@@ -263,19 +293,39 @@ struct kunit_suite_set {
struct kunit_suite * const *end;
};
+/* Stores the pointer to the parameter array and its metadata. */
+struct kunit_params {
+ /*
+ * Reference to the parameter array for a parameterized test. This
+ * is NULL if a parameter array wasn't directly passed to the
+ * parameterized test context struct kunit via kunit_register_params_array().
+ */
+ const void *params;
+ /* Reference to a function that gets the description of a parameter. */
+ void (*get_description)(struct kunit *test, const void *param, char *desc);
+ size_t num_params;
+ size_t elem_size;
+};
+
/**
* struct kunit - represents a running instance of a test.
*
* @priv: for user to store arbitrary data. Commonly used to pass data
* created in the init function (see &struct kunit_suite).
+ * @parent: reference to the parent context of type struct kunit that can
+ * be used for storing shared resources.
+ * @params_array: for storing the parameter array.
*
* Used to store information about the current context under which the test
* is running. Most of this data is private and should only be accessed
- * indirectly via public functions; the one exception is @priv which can be
- * used by the test writer to store arbitrary data.
+ * indirectly via public functions; the exceptions are @priv, @parent and
+ * @params_array which can be used by the test writer to store arbitrary data,
+ * access the parent context, and to store the parameter array, respectively.
*/
struct kunit {
void *priv;
+ struct kunit *parent;
+ struct kunit_params params_array;
/* private: internal use only. */
const char *name; /* Read only after initialization! */
@@ -346,6 +396,8 @@ void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
struct kunit_suite_set suite_set);
+const void *kunit_array_gen_params(struct kunit *test, const void *prev, char *desc);
+
#if IS_BUILTIN(CONFIG_KUNIT)
int kunit_run_all_tests(void);
#else
@@ -1674,9 +1726,12 @@ do { \
* Define function @name_gen_params which uses @array to generate parameters.
*/
#define KUNIT_ARRAY_PARAM(name, array, get_desc) \
- static const void *name##_gen_params(const void *prev, char *desc) \
+ static const void *name##_gen_params(struct kunit *test, \
+ const void *prev, char *desc) \
{ \
typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (!prev) \
+ kunit_register_params_array(test, array, ARRAY_SIZE(array), NULL); \
if (__next - (array) < ARRAY_SIZE((array))) { \
void (*__get_desc)(typeof(__next), char *) = get_desc; \
if (__get_desc) \
@@ -1695,9 +1750,12 @@ do { \
* Define function @name_gen_params which uses @array to generate parameters.
*/
#define KUNIT_ARRAY_PARAM_DESC(name, array, desc_member) \
- static const void *name##_gen_params(const void *prev, char *desc) \
+ static const void *name##_gen_params(struct kunit *test, \
+ const void *prev, char *desc) \
{ \
typeof((array)[0]) *__next = prev ? ((typeof(__next)) prev) + 1 : (array); \
+ if (!prev) \
+ kunit_register_params_array(test, array, ARRAY_SIZE(array), NULL); \
if (__next - (array) < ARRAY_SIZE((array))) { \
strscpy(desc, __next->desc_member, KUNIT_PARAM_DESC_SIZE); \
return __next; \
@@ -1705,6 +1763,33 @@ do { \
return NULL; \
}
+/**
+ * kunit_register_params_array() - Register parameter array for a KUnit test.
+ * @test: The KUnit test structure to which parameters will be added.
+ * @array: An array of test parameters.
+ * @param_count: Number of parameters.
+ * @get_desc: Function that generates a string description for a given parameter
+ * element.
+ *
+ * This macro initializes the @test's parameter array data, storing information
+ * including the parameter array, its count, the element size, and the parameter
+ * description function within `test->params_array`.
+ *
+ * Note: If using this macro in param_init(), kunit_array_gen_params()
+ * will then need to be manually provided as the parameter generator function to
+ * KUNIT_CASE_PARAM_WITH_INIT(). kunit_array_gen_params() is a KUnit
+ * function that uses the registered array to generate parameters
+ */
+#define kunit_register_params_array(test, array, param_count, get_desc) \
+ do { \
+ struct kunit *_test = (test); \
+ const typeof((array)[0]) * _params_ptr = &(array)[0]; \
+ _test->params_array.params = _params_ptr; \
+ _test->params_array.num_params = (param_count); \
+ _test->params_array.elem_size = sizeof(*_params_ptr); \
+ _test->params_array.get_description = (get_desc); \
+ } while (0)
+
// TODO(dlatypov@google.com): consider eventually migrating users to explicitly
// include resource.h themselves if they need it.
#include <kunit/resource.h>
diff --git a/include/linux/acpi_rimt.h b/include/linux/acpi_rimt.h
new file mode 100644
index 000000000000..fad3adc4d899
--- /dev/null
+++ b/include/linux/acpi_rimt.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc.
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#ifndef _ACPI_RIMT_H
+#define _ACPI_RIMT_H
+
+#ifdef CONFIG_ACPI_RIMT
+int rimt_iommu_register(struct device *dev);
+#else
+static inline int rimt_iommu_register(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_IOMMU_API) && defined(CONFIG_ACPI_RIMT)
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in);
+#else
+static inline int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _ACPI_RIMT_H */
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 9ef2633e2c08..d40ac39bfbe8 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -221,6 +221,16 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
ref->ct = NULL;
}
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag)
+{
+ tag->ct.flags |= CODETAG_FLAG_INACCURATE;
+}
+
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag)
+{
+ return !!(tag->ct.flags & CODETAG_FLAG_INACCURATE);
+}
+
#define alloc_tag_record(p) ((p) = current->alloc_tag)
#else /* CONFIG_MEM_ALLOC_PROFILING */
@@ -230,6 +240,8 @@ static inline bool mem_alloc_profiling_enabled(void) { return false; }
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
size_t bytes) {}
static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
+static inline void alloc_tag_set_inaccurate(struct alloc_tag *tag) {}
+static inline bool alloc_tag_is_inaccurate(struct alloc_tag *tag) { return false; }
#define alloc_tag_record(p) do {} while (0)
#endif /* CONFIG_MEM_ALLOC_PROFILING */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e721148c95d0..3e64f14739dd 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -66,16 +66,6 @@ static inline void wb_stat_mod(struct bdi_writeback *wb,
percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
-static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- wb_stat_mod(wb, item, 1);
-}
-
-static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
-{
- wb_stat_mod(wb, item, -1);
-}
-
static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
return percpu_counter_read_positive(&wb->stat[item]);
@@ -118,12 +108,10 @@ int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit
*
* BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
* should contribute to accounting
- * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
* BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
*/
#define BDI_CAP_WRITEBACK (1 << 0)
-#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
-#define BDI_CAP_STRICTLIMIT (1 << 2)
+#define BDI_CAP_STRICTLIMIT (1 << 1)
extern struct backing_dev_info noop_backing_dev_info;
diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h
index 0a25716820fe..851254f36eb3 100644
--- a/include/linux/bio-integrity.h
+++ b/include/linux/bio-integrity.h
@@ -13,6 +13,7 @@ enum bip_flags {
BIP_CHECK_GUARD = 1 << 5, /* guard check */
BIP_CHECK_REFTAG = 1 << 6, /* reftag check */
BIP_CHECK_APPTAG = 1 << 7, /* apptag check */
+ BIP_P2P_DMA = 1 << 8, /* using P2P address */
};
struct bio_integrity_payload {
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 46ffac5caab7..a64a30131031 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -322,8 +322,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
void bio_trim(struct bio *bio, sector_t offset, sector_t size);
extern struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs);
-int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
- unsigned *segs, unsigned max_bytes);
+int bio_split_io_at(struct bio *bio, const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes, unsigned len_align);
/**
* bio_next_split - get next @sectors from a bio, splitting if necessary
@@ -405,6 +405,11 @@ struct request_queue;
void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
unsigned short max_vecs, blk_opf_t opf);
+static inline void bio_init_inline(struct bio *bio, struct block_device *bdev,
+ unsigned short max_vecs, blk_opf_t opf)
+{
+ bio_init(bio, bdev, bio_inline_vecs(bio), max_vecs, opf);
+}
extern void bio_uninit(struct bio *);
void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
void bio_chain(struct bio *, struct bio *);
@@ -441,7 +446,14 @@ int submit_bio_wait(struct bio *bio);
int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
size_t len, enum req_op op);
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
+int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+ unsigned len_align_mask);
+
+static inline int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+ return bio_iov_iter_get_pages_aligned(bio, iter, 0);
+}
+
void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter);
void __bio_release_pages(struct bio *bio, bool mark_dirty);
extern void bio_set_pages_dirty(struct bio *bio);
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index e67a2b6e8f11..b659373788f6 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -4,6 +4,7 @@
#include <linux/blk-mq.h>
#include <linux/bio-integrity.h>
+#include <linux/blk-mq-dma.h>
struct request;
@@ -26,11 +27,25 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
#ifdef CONFIG_BLK_DEV_INTEGRITY
int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
+
+static inline bool blk_rq_integrity_dma_unmap(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ size_t mapped_len)
+{
+ return blk_dma_unmap(req, dma_dev, state, mapped_len,
+ bio_integrity(req->bio)->bip_flags & BIP_P2P_DMA);
+}
+
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
ssize_t bytes);
int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
struct logical_block_metadata_cap __user *argp);
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter);
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter);
static inline bool
blk_integrity_queue_supports_integrity(struct request_queue *q)
@@ -109,12 +124,29 @@ static inline int blk_rq_map_integrity_sg(struct request *q,
{
return 0;
}
+static inline bool blk_rq_integrity_dma_unmap(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ size_t mapped_len)
+{
+ return false;
+}
static inline int blk_rq_integrity_map_user(struct request *rq,
void __user *ubuf,
ssize_t bytes)
{
return -EINVAL;
}
+static inline bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+ struct device *dma_dev, struct dma_iova_state *state,
+ struct blk_dma_iter *iter)
+{
+ return false;
+}
+static inline bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+ struct device *dma_dev, struct blk_dma_iter *iter)
+{
+ return false;
+}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
return NULL;
diff --git a/include/linux/blk-mq-dma.h b/include/linux/blk-mq-dma.h
index c26a01aeae00..51829958d872 100644
--- a/include/linux/blk-mq-dma.h
+++ b/include/linux/blk-mq-dma.h
@@ -5,6 +5,13 @@
#include <linux/blk-mq.h>
#include <linux/pci-p2pdma.h>
+struct blk_map_iter {
+ struct bvec_iter iter;
+ struct bio *bio;
+ struct bio_vec *bvecs;
+ bool is_integrity;
+};
+
struct blk_dma_iter {
/* Output address range for this iteration */
dma_addr_t addr;
@@ -14,7 +21,7 @@ struct blk_dma_iter {
blk_status_t status;
/* Internal to blk_rq_dma_map_iter_* */
- struct req_iterator iter;
+ struct blk_map_iter iter;
struct pci_p2pdma_map_state p2pdma;
};
@@ -36,19 +43,20 @@ static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state)
}
/**
- * blk_rq_dma_unmap - try to DMA unmap a request
+ * blk_dma_unmap - try to DMA unmap a request
* @req: request to unmap
* @dma_dev: device to unmap from
* @state: DMA IOVA state
* @mapped_len: number of bytes to unmap
+ * @is_p2p: true if mapped with PCI_P2PDMA_MAP_BUS_ADDR
*
* Returns %false if the callers need to manually unmap every DMA segment
* mapped using @iter or %true if no work is left to be done.
*/
-static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, size_t mapped_len)
+static inline bool blk_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len, bool is_p2p)
{
- if (req->cmd_flags & REQ_P2PDMA)
+ if (is_p2p)
return true;
if (dma_use_iova(state)) {
@@ -60,4 +68,11 @@ static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
return !dma_need_unmap(dma_dev);
}
+static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, size_t mapped_len)
+{
+ return blk_dma_unmap(req, dma_dev, state, mapped_len,
+ req->cmd_flags & REQ_P2PDMA);
+}
+
#endif /* BLK_MQ_DMA_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2a5a828f19a0..b25d12545f46 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -507,6 +507,8 @@ enum hctx_type {
* request_queue.tag_set_list.
* @srcu: Use as lock when type of the request queue is blocking
* (BLK_MQ_F_BLOCKING).
+ * @tags_srcu: SRCU used to defer freeing of tags page_list to prevent
+ * use-after-free when iterating tags.
* @update_nr_hwq_lock:
* Synchronize updating nr_hw_queues with add/del disk &
* switching elevator.
@@ -531,6 +533,7 @@ struct blk_mq_tag_set {
struct mutex tag_list_lock;
struct list_head tag_list;
struct srcu_struct *srcu;
+ struct srcu_struct tags_srcu;
struct rw_semaphore update_nr_hwq_lock;
};
@@ -767,6 +770,7 @@ struct blk_mq_tags {
* request pool
*/
spinlock_t lock;
+ struct rcu_head rcu_head;
};
static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 09b99d52fd36..8e8d1cc8b06c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -198,10 +198,6 @@ static inline bool blk_path_error(blk_status_t error)
return true;
}
-struct bio_issue {
- u64 value;
-};
-
typedef __u32 __bitwise blk_opf_t;
typedef unsigned int blk_qc_t;
@@ -242,7 +238,8 @@ struct bio {
* on release of the bio.
*/
struct blkcg_gq *bi_blkg;
- struct bio_issue bi_issue;
+ /* Time that this bio was issued. */
+ u64 issue_time_ns;
#ifdef CONFIG_BLK_CGROUP_IOCOST
u64 bi_iocost_cost;
#endif
@@ -269,18 +266,16 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */
struct bio_set *bi_pool;
-
- /*
- * We can inline a number of vecs at the end of the bio, to avoid
- * double allocations for a small number of bio_vecs. This member
- * MUST obviously be kept at the very end of the bio.
- */
- struct bio_vec bi_inline_vecs[];
};
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT)
+static inline struct bio_vec *bio_inline_vecs(struct bio *bio)
+{
+ return (struct bio_vec *)(bio + 1);
+}
+
/*
* bio flags
*/
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fe1797bbec42..dad5cb5b3812 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -199,7 +199,7 @@ struct gendisk {
unsigned int zone_wplugs_hash_bits;
atomic_t nr_zone_wplugs;
spinlock_t zone_wplugs_lock;
- struct mempool_s *zone_wplugs_pool;
+ struct mempool *zone_wplugs_pool;
struct hlist_head *zone_wplugs_hash;
struct workqueue_struct *zone_wplugs_wq;
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -657,6 +657,7 @@ enum {
QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
+ QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
QUEUE_FLAG_MAX
};
@@ -999,6 +1000,8 @@ extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
void submit_bio_noacct(struct bio *bio);
struct bio *bio_split_to_limits(struct bio *bio);
+struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
+ struct bio_set *bs);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
@@ -1590,13 +1593,6 @@ static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
return queue_dma_alignment(bdev_get_queue(bdev));
}
-static inline bool bdev_iter_is_aligned(struct block_device *bdev,
- struct iov_iter *iter)
-{
- return iov_iter_is_aligned(iter, bdev_dma_alignment(bdev),
- bdev_logical_block_size(bdev) - 1);
-}
-
static inline unsigned int
blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{
@@ -1660,7 +1656,7 @@ struct block_device_operations {
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
void (*unlock_native_capacity) (struct gendisk *);
- int (*getgeo)(struct block_device *, struct hd_geometry *);
+ int (*getgeo)(struct gendisk *, struct hd_geometry *);
int (*set_read_only)(struct block_device *bdev, bool ro);
void (*free_disk)(struct gendisk *disk);
/* this callback is with swap_lock and sometimes page table lock held */
@@ -1870,6 +1866,20 @@ bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
}
+static inline int bio_split_rw_at(struct bio *bio,
+ const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes)
+{
+ return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
+}
+
+static inline int bio_iov_iter_get_bdev_pages(struct bio *bio,
+ struct iov_iter *iter, struct block_device *bdev)
+{
+ return bio_iov_iter_get_pages_aligned(bio, iter,
+ bdev_logical_block_size(bdev) - 1);
+}
+
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/bnxt/hsi.h b/include/linux/bnxt/hsi.h
index 549231703bce..47c34990cf23 100644
--- a/include/linux/bnxt/hsi.h
+++ b/include/linux/bnxt/hsi.h
@@ -276,6 +276,10 @@ struct cmd_nums {
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
#define HWRM_REG_POWER_HISTOGRAM 0xe3UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_START 0xe4UL
+ #define HWRM_MONITOR_PAX_HISTOGRAM_COLLECT 0xe5UL
+ #define HWRM_STAT_QUERY_ROCE_STATS 0xe6UL
+ #define HWRM_STAT_QUERY_ROCE_STATS_EXT 0xe7UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
@@ -407,9 +411,8 @@ struct cmd_nums {
#define HWRM_FUNC_LAG_UPDATE 0x1b1UL
#define HWRM_FUNC_LAG_FREE 0x1b2UL
#define HWRM_FUNC_LAG_QCFG 0x1b3UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
- #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
+ #define HWRM_FUNC_TTX_PACING_RATE_PROF_QUERY 0x1c3UL
+ #define HWRM_FUNC_TTX_PACING_RATE_QUERY 0x1c4UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -441,6 +444,7 @@ struct cmd_nums {
#define HWRM_MFG_WRITE_CERT_NVM 0x21cUL
#define HWRM_PORT_POE_CFG 0x230UL
#define HWRM_PORT_POE_QCFG 0x231UL
+ #define HWRM_PORT_PHY_FDRSTAT 0x232UL
#define HWRM_UDCC_QCAPS 0x258UL
#define HWRM_UDCC_CFG 0x259UL
#define HWRM_UDCC_QCFG 0x25aUL
@@ -453,6 +457,8 @@ struct cmd_nums {
#define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
#define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
#define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_QCFG 0x264UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_QCFG 0x265UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -551,6 +557,8 @@ struct cmd_nums {
#define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
#define HWRM_DBG_PTRACE 0xff2dUL
#define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
+ #define HWRM_DBG_TOKEN_QUERY_AUTH_IDS 0xff2fUL
+ #define HWRM_DBG_TOKEN_CFG 0xff30UL
#define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
#define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
#define HWRM_NVM_DEFRAG 0xffecUL
@@ -632,8 +640,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 97
-#define HWRM_VERSION_STR "1.10.3.97"
+#define HWRM_VERSION_RSVD 133
+#define HWRM_VERSION_STR "1.10.3.133"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -688,6 +696,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_DEBUG_TOKEN_SUPPORTED 0x20000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -872,7 +881,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ADPTV_QOS 0x51UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x52UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1344,7 +1354,8 @@ struct hwrm_async_event_cmpl_dbg_buf_producer {
#define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
#define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
#define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
- #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE 0xcUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ERR_QPC_TRACE
};
/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
@@ -1401,7 +1412,11 @@ struct hwrm_async_event_cmpl_error_report_base {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUP_UDCC_SES 0x7UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DB_DROP 0x8UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MD_TEMP 0x9UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR 0xaUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_VNIC_ERR
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1914,6 +1929,12 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL
#define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL
#define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_ROCE_VF_DYN_ALLOC_SUPPORT 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_CHANGE_UDP_SRCPORT_SUPPORT 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_COMPLIANCE_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MULTI_L2_DB_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_PCIE_SECURE_ATS_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MBUF_STATS_SUPPORTED 0x800UL
__le16 max_roce_vfs;
__le16 max_crypto_rx_flow_filters;
u8 unused_3[3];
@@ -1931,7 +1952,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:1344b/168B) */
+/* hwrm_func_qcfg_output (size:1408b/176B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2124,7 +2145,43 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
#define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
__le16 mirror_vnic_id;
- u8 unused_7[7];
+ u8 max_link_width;
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_MAX_LINK_WIDTH_LAST FUNC_QCFG_RESP_MAX_LINK_WIDTH_X16
+ u8 max_link_speed;
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_MAX_LINK_SPEED_LAST FUNC_QCFG_RESP_MAX_LINK_SPEED_G5
+ u8 negotiated_link_width;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X8 0x8UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16 0x10UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_WIDTH_X16
+ u8 negotiated_link_speed;
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_UNKNOWN 0x0UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G1 0x1UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G2 0x2UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G3 0x3UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G4 0x4UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5 0x5UL
+ #define FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_LAST FUNC_QCFG_RESP_NEGOTIATED_LINK_SPEED_G5
+ u8 unused_7[2];
+ u8 pcie_compliance;
+ u8 unused_8;
+ __le16 l2_db_multi_page_size_kb;
+ u8 unused_9[5];
u8 valid;
};
@@ -2322,6 +2379,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
#define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
#define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
+ #define FUNC_CFG_REQ_ENABLES2_PCIE_COMPLIANCE 0x1000UL
u8 port_kdnet_mode;
#define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
@@ -2353,7 +2411,8 @@ struct hwrm_func_cfg_input {
__le16 xid_partition_cfg;
#define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
#define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
- __le16 unused_2;
+ u8 pcie_compliance;
+ u8 unused_2;
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2370,11 +2429,41 @@ struct hwrm_func_cfg_output {
struct hwrm_func_cfg_cmd_err {
u8 code;
#define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
- #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
- #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_OUT_OF_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_NPAR_PARTITION_DOWN_FAILED 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_TPID_SET_DFLT_VLAN_NOT_SET 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_RES_ARRAY_ALLOC_FAILED 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_ASSET_TEST_FAILED 0x5UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_RING_RES_UPDATE_FAILED 0x6UL
+ #define FUNC_CFG_CMD_ERR_CODE_APPLY_MAX_BW_FAILED 0x7UL
+ #define FUNC_CFG_CMD_ERR_CODE_ENABLE_EVB_FAILED 0x8UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_ASSET_TEST_FAILED 0x9UL
+ #define FUNC_CFG_CMD_ERR_CODE_RSS_CTXT_RES_UPDATE_FAILED 0xaUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_ASSET_TEST_FAILED 0xbUL
+ #define FUNC_CFG_CMD_ERR_CODE_CMPL_RING_RES_UPDATE_FAILED 0xcUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_ASSET_TEST_FAILED 0xdUL
+ #define FUNC_CFG_CMD_ERR_CODE_NQ_RES_UPDATE_FAILED 0xeUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_ASSET_TEST_FAILED 0xfUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_RING_RES_UPDATE_FAILED 0x10UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_ASSET_TEST_FAILED 0x11UL
+ #define FUNC_CFG_CMD_ERR_CODE_VNIC_RES_UPDATE_FAILED 0x12UL
+ #define FUNC_CFG_CMD_ERR_CODE_FAILED_TO_START_STATS_THREAD 0x13UL
+ #define FUNC_CFG_CMD_ERR_CODE_RDMA_SRIOV_DISABLED 0x14UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_DISABLED 0x15UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_ASSET_TEST_FAILED 0x16UL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_KTLS_RES_UPDATE_FAILED 0x17UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_DISABLED 0x18UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_ASSET_TEST_FAILED 0x19UL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_KTLS_RES_UPDATE_FAILED 0x1aUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_DISABLED 0x1bUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_ASSET_TEST_FAILED 0x1cUL
+ #define FUNC_CFG_CMD_ERR_CODE_TX_QUIC_RES_UPDATE_FAILED 0x1dUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_DISABLED 0x1eUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_ASSET_TEST_FAILED 0x1fUL
+ #define FUNC_CFG_CMD_ERR_CODE_RX_QUIC_RES_UPDATE_FAILED 0x20UL
+ #define FUNC_CFG_CMD_ERR_CODE_INVALID_KDNET_MODE 0x21UL
+ #define FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL 0x22UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_SCHQ_CFG_FAIL
u8 unused_0[7];
};
@@ -3780,6 +3869,7 @@ struct hwrm_func_backing_store_cfg_v2_input {
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3865,6 +3955,7 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3904,6 +3995,7 @@ struct hwrm_func_backing_store_qcfg_v2_output {
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ERR_QPC_TRACE 0x2aUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
@@ -4027,6 +4119,7 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ERR_QPC_TRACE 0x2bUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
@@ -4070,6 +4163,7 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ERR_QPC_TRACE 0x2bUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
@@ -4216,6 +4310,10 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_ENABLE 0x800000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_LINK_TRAINING_DISABLE 0x1000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_ENABLE 0x2000000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_PRECODING_DISABLE 0x4000000UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -4703,6 +4801,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_LINK_TRAINING 0x8UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_PRECODING 0x10UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -4725,6 +4825,10 @@ struct hwrm_port_phy_qcfg_output {
u8 link_down_reason;
#define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_CABLE_REMOVED 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_MODULE_FAULT 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_BMC_REQUEST 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_TX_LASER_DISABLED 0x20UL
__le16 support_speeds2;
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
@@ -5882,9 +5986,10 @@ struct hwrm_port_led_qcaps_output {
#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
#define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
__le16 led0_color_caps;
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
u8 led1_id;
u8 led1_type;
#define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
@@ -5900,9 +6005,10 @@ struct hwrm_port_led_qcaps_output {
#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
#define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
__le16 led1_color_caps;
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
u8 led2_id;
u8 led2_type;
#define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
@@ -5918,9 +6024,10 @@ struct hwrm_port_led_qcaps_output {
#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
#define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
__le16 led2_color_caps;
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
u8 led3_id;
u8 led3_type;
#define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
@@ -5936,9 +6043,10 @@ struct hwrm_port_led_qcaps_output {
#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
#define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
__le16 led3_color_caps;
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GRNAMB_SUPPORTED 0x8UL
u8 unused_4[3];
u8 valid;
};
@@ -6643,6 +6751,67 @@ struct hwrm_queue_dscp2pri_cfg_output {
u8 valid;
};
+/* hwrm_queue_pfcwd_timeout_qcaps_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcaps_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 max_pfcwd_timeout;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcwd_timeout_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[6];
+};
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pfcwd_timeout_value;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* hwrm_vnic_alloc_input (size:192b/24B) */
struct hwrm_vnic_alloc_input {
__le16 req_type;
@@ -7036,9 +7205,22 @@ struct hwrm_vnic_rss_cfg_output {
/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */
struct hwrm_vnic_rss_cfg_cmd_err {
u8 code;
- #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
- #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_UNABLE_TO_GET_RSS_CFG 0x2UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_UNSUPPORTED 0x3UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_TYPE_ERR 0x4UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_MODE_FAIL 0x5UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RING_GRP_TABLE_ALLOC_ERR 0x6UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HASH_KEY_ALLOC_ERR 0x7UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_DMA_FAILED 0x8UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_RX_RING_ALLOC_ERR 0x9UL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CMPL_RING_ALLOC_ERR 0xaUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_HW_SET_RSS_FAILED 0xbUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_CTX_INVALID 0xcUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_INVALID 0xdUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID 0xeUL
+ #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_VNIC_RING_TABLE_PAIR_INVALID
u8 unused_0[7];
};
@@ -7177,7 +7359,7 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
u8 valid;
};
-/* hwrm_ring_alloc_input (size:704b/88B) */
+/* hwrm_ring_alloc_input (size:768b/96B) */
struct hwrm_ring_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -7195,6 +7377,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
#define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
#define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL
+ #define RING_ALLOC_REQ_ENABLES_DPI_VALID 0x2000UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -7287,6 +7470,8 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE
u8 unused_4;
__le64 cq_handle;
+ __le16 dpi;
+ __le16 unused_5[3];
};
/* hwrm_ring_alloc_output (size:128b/16B) */
@@ -7776,7 +7961,10 @@ struct hwrm_cfa_l2_set_rx_mask_cmd_err {
u8 code;
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
#define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_MAX_VLAN_TAGS 0x2UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_VNIC_ID 0x3UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION 0x4UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_INVALID_ACTION
u8 unused_0[7];
};
@@ -8109,9 +8297,38 @@ struct hwrm_cfa_ntuple_filter_alloc_output {
/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
u8 code;
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_MAC 0x65UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_BC_MC_MAC 0x66UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_VNIC 0x67UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_PF_FID 0x68UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L2_CTXT_ID 0x69UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_CTXT_CFG 0x6aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_NULL_L2_DATA_FLD 0x6bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_CFA_LAYOUT 0x6cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_CTXT_ALLOC_FAIL 0x6dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ROCE_FLOW_ERR 0x6eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_OWNER_FID 0x6fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ZERO_REF_CNT 0x70UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_FLOW_TYPE 0x71UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_IVLAN 0x72UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_MAX_VLAN_ID 0x73UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_TNL_REQ 0x74UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_ADDR 0x75UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L2_IVLAN 0x76UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR 0x77UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_L3_ADDR_TYPE 0x78UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_T_L3_ADDR_TYPE 0x79UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DST_VNIC_ID 0x7aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VNI 0x7bUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_DST_ID 0x7cUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_FAIL_ROCE_L2_FLOW 0x7dUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_NPAR_VLAN 0x7eUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_ATSP_ADD 0x7fUL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_DFLT_VLAN_FAIL 0x80UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_INVALID_L3_TYPE 0x81UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW 0x82UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_VAL_FAIL_TNL_FLOW
u8 unused_0[7];
};
@@ -9181,7 +9398,7 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
-/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */
+/* pcie_ctx_hw_stats_v2 (size:4544b/568B) */
struct pcie_ctx_hw_stats_v2 {
__le64 pcie_pl_signal_integrity;
__le64 pcie_dl_signal_integrity;
@@ -9212,6 +9429,9 @@ struct pcie_ctx_hw_stats_v2 {
__le64 pcie_other_packet_count;
__le64 pcie_blocked_packet_count;
__le64 pcie_cmpl_packet_count;
+ __le32 pcie_rd_latency_histogram[12];
+ __le32 pcie_rd_latency_all_normal_count;
+ __le32 unused_2;
};
/* hwrm_stat_generic_qstats_input (size:256b/32B) */
@@ -9406,7 +9626,8 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
#define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
#define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
+ #define STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS 0x190UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_DBG_TOKEN_CLAIMS
__le16 len;
u8 version;
#define STRUCT_HDR_VERSION_0 0x0UL
@@ -9459,11 +9680,13 @@ struct hwrm_fw_set_structured_data_output {
/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */
struct hwrm_fw_set_structured_data_cmd_err {
u8 code;
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
- #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_ALREADY_ADDED 0x4UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG 0x5UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_INST_IN_PROG
u8 unused_0[7];
};
@@ -9487,7 +9710,9 @@ struct hwrm_fw_get_structured_data_input {
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
- #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_SUPPORTED 0x320UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE 0x321UL
+ #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_CLAIMS_ACTIVE
u8 count;
u8 unused_0;
};
@@ -10172,7 +10397,8 @@ struct hwrm_dbg_log_buffer_flush_input {
#define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
#define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
#define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
- #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE 0xcUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE
u8 unused_1[2];
__le32 flags;
#define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
@@ -10295,10 +10521,15 @@ struct hwrm_nvm_write_output {
/* hwrm_nvm_write_cmd_err (size:64b/8B) */
struct hwrm_nvm_write_cmd_err {
u8 code;
- #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_WRITE_CMD_ERR_CODE_WRITE_FAILED 0x3UL
+ #define NVM_WRITE_CMD_ERR_CODE_REQD_ERASE_FAILED 0x4UL
+ #define NVM_WRITE_CMD_ERR_CODE_VERIFY_FAILED 0x5UL
+ #define NVM_WRITE_CMD_ERR_CODE_INVALID_HEADER 0x6UL
+ #define NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED 0x7UL
+ #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_UPDATE_DIGEST_FAILED
u8 unused_0[7];
};
@@ -10438,7 +10669,11 @@ struct hwrm_nvm_get_dev_info_output {
__le16 srt2_fw_minor;
__le16 srt2_fw_build;
__le16 srt2_fw_patch;
- u8 unused_0[7];
+ u8 security_soc_fw_major;
+ u8 security_soc_fw_minor;
+ u8 security_soc_fw_build;
+ u8 security_soc_fw_patch;
+ u8 unused_0[3];
u8 valid;
};
@@ -10568,7 +10803,9 @@ struct hwrm_nvm_install_update_cmd_err {
#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
#define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_DEFRAG_FAILED 0x5UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR 0x6UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN_DIR_ERR
u8 unused_0[7];
};
@@ -10591,7 +10828,8 @@ struct hwrm_nvm_get_variable_input {
__le16 index_2;
__le16 index_3;
u8 flags;
- #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL
+ #define NVM_GET_VARIABLE_REQ_FLAGS_VALIDATE_OPT_VALUE 0x2UL
u8 unused_0;
};
@@ -10606,18 +10844,25 @@ struct hwrm_nvm_get_variable_output {
#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL
#define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL
#define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF
- u8 unused_0[3];
+ u8 flags;
+ #define NVM_GET_VARIABLE_RESP_FLAGS_VALIDATE_OPT_VALUE 0x1UL
+ u8 unused_0[2];
u8 valid;
};
/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_get_variable_cmd_err {
u8 code;
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
- #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x4UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x5UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x6UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x7UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x8UL
+ #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_NO_MEM
u8 unused_0[7];
};
@@ -10667,10 +10912,17 @@ struct hwrm_nvm_set_variable_output {
/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
struct hwrm_nvm_set_variable_cmd_err {
u8 code;
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
- #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACTION_NOT_SUPPORTED 0x4UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INDEX_INVALID 0x5UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_ACCESS_DENIED 0x6UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_CB_FAILED 0x7UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_INVALID_DATA_LEN 0x8UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM 0x9UL
+ #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_NO_MEM
u8 unused_0[7];
};
diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h
index 1af241525a17..f6e0795db484 100644
--- a/include/linux/bpfptr.h
+++ b/include/linux/bpfptr.h
@@ -67,7 +67,7 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len)
{
- void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN);
+ void *p = kvmalloc_node_align_noprof(len, 1, GFP_USER | __GFP_NOWARN, NUMA_NO_NODE);
if (!p)
return ERR_PTR(-ENOMEM);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 0a80e1f9aa20..3fc0efa0825b 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -22,11 +22,8 @@ struct page;
* @bv_len: Number of bytes in the address range.
* @bv_offset: Start of the address range relative to the start of @bv_page.
*
- * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len:
- *
- * nth_page(@bv_page, n) == @bv_page + n
- *
- * This holds because page_is_mergeable() checks the above property.
+ * All pages within a bio_vec starting from @bv_page are contiguous and
+ * can simply be iterated (see bvec_advance()).
*/
struct bio_vec {
struct page *bv_page;
diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h
index 5dfdbb63b1d5..d30816dd93c7 100644
--- a/include/linux/can/bittiming.h
+++ b/include/linux/can/bittiming.h
@@ -16,6 +16,10 @@
#define CAN_CTRLMODE_FD_TDC_MASK \
(CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL)
+#define CAN_CTRLMODE_TDC_AUTO_MASK \
+ (CAN_CTRLMODE_TDC_AUTO)
+#define CAN_CTRLMODE_TDC_MANUAL_MASK \
+ (CAN_CTRLMODE_TDC_MANUAL)
/*
* struct can_tdc - CAN FD Transmission Delay Compensation parameters
@@ -114,13 +118,24 @@ struct can_tdc_const {
u32 tdcf_max;
};
+struct data_bittiming_params {
+ const struct can_bittiming_const *data_bittiming_const;
+ struct can_bittiming data_bittiming;
+ const struct can_tdc_const *tdc_const;
+ struct can_tdc tdc;
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
+ int (*do_set_data_bittiming)(struct net_device *dev);
+ int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
+};
+
#ifdef CONFIG_CAN_CALC_BITTIMING
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc, struct netlink_ext_ack *extack);
void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported);
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported);
#else /* !CONFIG_CAN_CALC_BITTIMING */
static inline int
can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
@@ -133,7 +148,7 @@ can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
static inline void
can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
+ u32 tdc_mask, u32 *ctrlmode, u32 ctrlmode_supported)
{
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
@@ -150,6 +165,35 @@ int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
struct netlink_ext_ack *extack);
/*
+ * can_get_relative_tdco() - TDCO relative to the sample point
+ *
+ * struct can_tdc::tdco represents the absolute offset from TDCV. Some
+ * controllers use instead an offset relative to the Sample Point (SP)
+ * such that:
+ *
+ * SSP = TDCV + absolute TDCO
+ * = TDCV + SP + relative TDCO
+ *
+ * -+----------- one bit ----------+-- TX pin
+ * |<--- Sample Point --->|
+ *
+ * --+----------- one bit ----------+-- RX pin
+ * |<-------- TDCV -------->|
+ * |<------------------------>| absolute TDCO
+ * |<--- Sample Point --->|
+ * | |<->| relative TDCO
+ * |<------------- Secondary Sample Point ------------>|
+ */
+static inline s32 can_get_relative_tdco(const struct data_bittiming_params *dbt_params)
+{
+ const struct can_bittiming *dbt = &dbt_params->data_bittiming;
+ s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ return (s32)dbt_params->tdc.tdco - sample_point_in_tc;
+}
+
+/*
* can_bit_time() - Duration of one bit
*
* Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 9a92cbe5b2cb..a2229a61ccde 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -38,17 +38,6 @@ enum can_termination_gpio {
CAN_TERMINATION_GPIO_MAX,
};
-struct data_bittiming_params {
- const struct can_bittiming_const *data_bittiming_const;
- struct can_bittiming data_bittiming;
- const struct can_tdc_const *tdc_const;
- struct can_tdc tdc;
- const u32 *data_bitrate_const;
- unsigned int data_bitrate_const_cnt;
- int (*do_set_data_bittiming)(struct net_device *dev);
- int (*do_get_auto_tdcv)(const struct net_device *dev, u32 *tdcv);
-};
-
/*
* CAN common private data
*/
@@ -96,56 +85,6 @@ static inline bool can_fd_tdc_is_enabled(const struct can_priv *priv)
return !!(priv->ctrlmode & CAN_CTRLMODE_FD_TDC_MASK);
}
-/*
- * can_get_relative_tdco() - TDCO relative to the sample point
- *
- * struct can_tdc::tdco represents the absolute offset from TDCV. Some
- * controllers use instead an offset relative to the Sample Point (SP)
- * such that:
- *
- * SSP = TDCV + absolute TDCO
- * = TDCV + SP + relative TDCO
- *
- * -+----------- one bit ----------+-- TX pin
- * |<--- Sample Point --->|
- *
- * --+----------- one bit ----------+-- RX pin
- * |<-------- TDCV -------->|
- * |<------------------------>| absolute TDCO
- * |<--- Sample Point --->|
- * | |<->| relative TDCO
- * |<------------- Secondary Sample Point ------------>|
- */
-static inline s32 can_get_relative_tdco(const struct can_priv *priv)
-{
- const struct can_bittiming *dbt = &priv->fd.data_bittiming;
- s32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
- dbt->phase_seg1) * dbt->brp;
-
- return (s32)priv->fd.tdc.tdco - sample_point_in_tc;
-}
-
-/* helper to define static CAN controller features at device creation time */
-static inline int __must_check can_set_static_ctrlmode(struct net_device *dev,
- u32 static_mode)
-{
- struct can_priv *priv = netdev_priv(dev);
-
- /* alloc_candev() succeeded => netdev_priv() is valid at this point */
- if (priv->ctrlmode_supported & static_mode) {
- netdev_warn(dev,
- "Controller features can not be supported and static at the same time\n");
- return -EINVAL;
- }
- priv->ctrlmode = static_mode;
-
- /* override MTU which was set by default in can_setup()? */
- if (static_mode & CAN_CTRLMODE_FD)
- dev->mtu = CANFD_MTU;
-
- return 0;
-}
-
static inline u32 can_get_static_ctrlmode(struct can_priv *priv)
{
return priv->ctrlmode & ~priv->ctrlmode_supported;
@@ -187,7 +126,10 @@ struct can_priv *safe_candev_priv(struct net_device *dev);
int open_candev(struct net_device *dev);
void close_candev(struct net_device *dev);
+void can_set_default_mtu(struct net_device *dev);
int can_change_mtu(struct net_device *dev, int new_mtu);
+int __must_check can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode);
int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd);
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
struct kernel_ethtool_ts_info *info);
@@ -199,6 +141,8 @@ int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
const char *can_get_state_str(const enum can_state state);
+const char *can_get_ctrlmode_str(u32 ctrlmode);
+
void can_state_get_by_berr_counter(const struct net_device *dev,
const struct can_berr_counter *bec,
enum can_state *tx_state,
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
index f38772fd0c07..d3788a3d0942 100644
--- a/include/linux/can/dev/peak_canfd.h
+++ b/include/linux/can/dev/peak_canfd.h
@@ -2,8 +2,8 @@
/*
* CAN driver for PEAK System micro-CAN based adapters
*
- * Copyright (C) 2003-2011 PEAK System-Technik GmbH
- * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ * Copyright (C) 2003-2025 PEAK System-Technik GmbH
+ * Author: Stéphane Grosjean <stephane.grosjean@hms-networks.com>
*/
#ifndef PUCAN_H
#define PUCAN_H
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index 457ed8fd3214..8ea2a5f7c98a 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -16,13 +16,16 @@ struct module;
#define CODETAG_SECTION_START_PREFIX "__start_"
#define CODETAG_SECTION_STOP_PREFIX "__stop_"
+/* codetag flags */
+#define CODETAG_FLAG_INACCURATE (1 << 0)
+
/*
* An instance of this structure is created in a special ELF section at every
* code location being tagged. At runtime, the special section is treated as
* an array of these.
*/
struct codetag {
- unsigned int flags; /* used in later patches */
+ unsigned int flags;
unsigned int lineno;
const char *modname;
const char *function;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 95f3807c8c55..40966512ea18 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -780,11 +780,10 @@ struct cpufreq_frequency_table {
else
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table);
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy);
+
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy);
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table);
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 9e62b2a85538..cae8c613c5fc 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -110,7 +110,7 @@ struct damon_target {
*
* @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
* @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
- * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT.
+ * @DAMOS_PAGEOUT: Reclaim the region.
* @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
* @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
* @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
@@ -121,10 +121,10 @@ struct damon_target {
* @NR_DAMOS_ACTIONS: Total number of DAMOS actions
*
* The support of each action is up to running &struct damon_operations.
- * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR supports all actions except
- * &enum DAMOS_LRU_PRIO and &enum DAMOS_LRU_DEPRIO. &enum DAMON_OPS_PADDR
- * supports only &enum DAMOS_PAGEOUT, &enum DAMOS_LRU_PRIO, &enum
- * DAMOS_LRU_DEPRIO, and &DAMOS_STAT.
+ * Refer to 'Operation Action' section of Documentation/mm/damon/design.rst for
+ * status of the supports.
+ *
+ * Note that DAMOS_PAGEOUT doesn't trigger demotions.
*/
enum damos_action {
DAMOS_WILLNEED,
@@ -748,7 +748,8 @@ struct damon_attrs {
* Accesses to other fields must be protected by themselves.
*
* @ops: Set of monitoring operations for given use cases.
- *
+ * @addr_unit: Scale factor for core to ops address conversion.
+ * @min_sz_region: Minimum region size.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
*/
@@ -790,6 +791,8 @@ struct damon_ctx {
struct mutex kdamond_lock;
struct damon_operations ops;
+ unsigned long addr_unit;
+ unsigned long min_sz_region;
struct list_head adaptive_targets;
struct list_head schemes;
@@ -878,7 +881,7 @@ static inline void damon_insert_region(struct damon_region *r,
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges);
+ unsigned int nr_ranges, unsigned long min_sz_region);
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
struct damon_attrs *attrs);
@@ -935,6 +938,7 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs
}
+bool damon_initialized(void);
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
bool damon_is_running(struct damon_ctx *ctx);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index cc3e1c1a3454..c83e02b94389 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -95,7 +95,10 @@ struct dentry {
seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
- struct qstr d_name;
+ union {
+ struct qstr __d_name; /* for use ONLY in fs/dcache.c */
+ const struct qstr d_name;
+ };
struct inode *d_inode; /* Where the name belongs to - NULL is
* negative */
union shortname_store d_shortname;
diff --git a/include/linux/device/devres.h b/include/linux/device/devres.h
index ae696d10faff..8c5f57e0d613 100644
--- a/include/linux/device/devres.h
+++ b/include/linux/device/devres.h
@@ -80,6 +80,8 @@ void devm_kfree(struct device *dev, const void *p);
void * __realloc_size(3)
devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp);
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp);
static inline void *devm_kmemdup_array(struct device *dev, const void *src,
size_t n, size_t size, gfp_t flags)
{
diff --git a/include/linux/dibs.h b/include/linux/dibs.h
new file mode 100644
index 000000000000..c75607f8a5cf
--- /dev/null
+++ b/include/linux/dibs.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Direct Internal Buffer Sharing
+ *
+ * Definitions for the DIBS module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#ifndef _DIBS_H
+#define _DIBS_H
+
+#include <linux/device.h>
+#include <linux/uuid.h>
+
+/* DIBS - Direct Internal Buffer Sharing - concept
+ * -----------------------------------------------
+ * In the case of multiple system sharing the same hardware, dibs fabrics can
+ * provide dibs devices to these systems. The systems use dibs devices of the
+ * same fabric to communicate via dmbs (Direct Memory Buffers). Each dmb has
+ * exactly one owning local dibs device and one remote using dibs device, that
+ * is authorized to write into this dmb. This access control is provided by the
+ * dibs fabric.
+ *
+ * Because the access to the dmb is based on access to physical memory, it is
+ * lossless and synchronous. The remote devices can directly access any offset
+ * of the dmb.
+ *
+ * Dibs fabrics, dibs devices and dmbs are identified by tokens and ids.
+ * Dibs fabric id is unique within the same hardware (with the exception of the
+ * dibs loopback fabric), dmb token is unique within the same fabric, dibs
+ * device gids are guaranteed to be unique within the same fabric and
+ * statistically likely to be globally unique. The exchange of these tokens and
+ * ids between the systems is not part of the dibs concept.
+ *
+ * The dibs layer provides an abstraction between dibs device drivers and dibs
+ * clients.
+ */
+
+/* DMB - Direct Memory Buffer
+ * --------------------------
+ * A dibs client provides a dmb as input buffer for a local receiving
+ * dibs device for exactly one (remote) sending dibs device. Only this
+ * sending device can send data into this dmb using move_data(). Sender
+ * and receiver can be the same device. A dmb belongs to exactly one client.
+ */
+struct dibs_dmb {
+ /* tok - Token for this dmb
+ * Used by remote and local devices and clients to address this dmb.
+ * Provided by dibs fabric. Unique per dibs fabric.
+ */
+ u64 dmb_tok;
+ /* rgid - GID of designated remote sending device */
+ uuid_t rgid;
+ /* cpu_addr - buffer address */
+ void *cpu_addr;
+ /* len - buffer length */
+ u32 dmb_len;
+ /* idx - Index of this DMB on this receiving device */
+ u32 idx;
+ /* VLAN support (deprecated)
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan
+ */
+ u32 vlan_valid;
+ u32 vlan_id;
+ /* optional, used by device driver */
+ dma_addr_t dma_addr;
+};
+
+/* DIBS events
+ * -----------
+ * Dibs devices can optionally notify dibs clients about events that happened
+ * in the fabric or at the remote device or remote dmb.
+ */
+enum dibs_event_type {
+ /* Buffer event, e.g. a remote dmb was unregistered */
+ DIBS_BUF_EVENT,
+ /* Device event, e.g. a remote dibs device was disabled */
+ DIBS_DEV_EVENT,
+ /* Software event, a dibs client can send an event signal to a
+ * remote dibs device.
+ */
+ DIBS_SW_EVENT,
+ DIBS_OTHER_TYPE };
+
+enum dibs_event_subtype {
+ DIBS_BUF_UNREGISTERED,
+ DIBS_DEV_DISABLED,
+ DIBS_DEV_ERR_STATE,
+ DIBS_OTHER_SUBTYPE
+};
+
+struct dibs_event {
+ u32 type;
+ u32 subtype;
+ /* uuid_null if invalid */
+ uuid_t gid;
+ /* zero if invalid */
+ u64 buffer_tok;
+ u64 time;
+ /* additional data or zero */
+ u64 data;
+};
+
+struct dibs_dev;
+
+/* DIBS client
+ * -----------
+ */
+#define MAX_DIBS_CLIENTS 8
+#define NO_DIBS_CLIENT 0xff
+/* All dibs clients have access to all dibs devices.
+ * A dibs client provides the following functions to be called by dibs layer or
+ * dibs device drivers:
+ */
+struct dibs_client_ops {
+ /**
+ * add_dev() - add a dibs device
+ * @dev: device that was added
+ *
+ * Will be called during dibs_register_client() for all existing
+ * dibs devices and whenever a new dibs device is registered.
+ * dev is usable until dibs_client.remove() is called.
+ * *dev is protected by device refcounting.
+ */
+ void (*add_dev)(struct dibs_dev *dev);
+ /**
+ * del_dev() - remove a dibs device
+ * @dev: device to be removed
+ *
+ * Will be called whenever a dibs device is removed.
+ * Will be called during dibs_unregister_client() for all existing
+ * dibs devices and whenever a dibs device is unregistered.
+ * The device has already stopped initiative for this client:
+ * No new handlers will be started.
+ * The device is no longer usable by this client after this call.
+ */
+ void (*del_dev)(struct dibs_dev *dev);
+ /**
+ * handle_irq() - Handle signaling for a DMB
+ * @dev: device that owns the dmb
+ * @idx: Index of the dmb that got signalled
+ * @dmbemask: signaling mask of the dmb
+ *
+ * Handle signaling for a dmb that was registered by this client
+ * for this device.
+ * The dibs device can coalesce multiple signaling triggers into a
+ * single call of handle_irq(). dmbemask can be used to indicate
+ * different kinds of triggers.
+ *
+ * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_irq)(struct dibs_dev *dev, unsigned int idx,
+ u16 dmbemask);
+ /**
+ * handle_event() - Handle control information sent by device
+ * @dev: device reporting the event
+ * @event: ism event structure
+ *
+ * * Context: Called in IRQ context by dibs device driver
+ */
+ void (*handle_event)(struct dibs_dev *dev,
+ const struct dibs_event *event);
+};
+
+struct dibs_client {
+ /* client name for logging and debugging purposes */
+ const char *name;
+ const struct dibs_client_ops *ops;
+ /* client index - provided and used by dibs layer */
+ u8 id;
+};
+
+/* Functions to be called by dibs clients:
+ */
+/**
+ * dibs_register_client() - register a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->add_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_register_client(struct dibs_client *client);
+/**
+ * dibs_unregister_client() - unregister a client with dibs layer
+ * @client: this client
+ *
+ * Will call client->ops->del_dev() for all existing dibs devices.
+ * Return: zero on success.
+ */
+int dibs_unregister_client(struct dibs_client *client);
+
+/* dibs clients can call dibs device ops. */
+
+/* DIBS devices
+ * ------------
+ */
+
+/* Defined fabric id / CHID for all loopback devices:
+ * All dibs loopback devices report this fabric id. In this case devices with
+ * the same fabric id can NOT communicate via dibs. Only loopback devices with
+ * the same dibs device gid can communicate (=same device with itself).
+ */
+#define DIBS_LOOPBACK_FABRIC 0xFFFF
+
+/* A dibs device provides the following functions to be called by dibs clients.
+ * They are mandatory, unless marked 'optional'.
+ */
+struct dibs_dev_ops {
+ /**
+ * get_fabric_id()
+ * @dev: local dibs device
+ *
+ * Only devices on the same dibs fabric can communicate. Fabric_id is
+ * unique inside the same HW system. Use fabric_id for fast negative
+ * checks, but only query_remote_gid() can give a reliable positive
+ * answer:
+ * Different fabric_id: dibs is not possible
+ * Same fabric_id: dibs may be possible or not
+ * (e.g. different HW systems)
+ * EXCEPTION: DIBS_LOOPBACK_FABRIC denotes an ism_loopback device
+ * that can only communicate with itself. Use dibs_dev.gid
+ * or query_remote_gid()to determine whether sender and
+ * receiver use the same ism_loopback device.
+ * Return: 2 byte dibs fabric id
+ */
+ u16 (*get_fabric_id)(struct dibs_dev *dev);
+ /**
+ * query_remote_gid()
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * @vid_valid: if zero, vid will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * Query whether a remote dibs device is reachable via this local device
+ * and this vlan id.
+ * Return: 0 if remote gid is reachable.
+ */
+ int (*query_remote_gid)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 vid_valid, u32 vid);
+ /**
+ * max_dmbs()
+ * Return: Max number of DMBs that can be registered for this kind of
+ * dibs_dev
+ */
+ int (*max_dmbs)(void);
+ /**
+ * register_dmb() - allocate and register a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be registered
+ * @client: dibs client
+ * @vid: VLAN id; deprecated, ignored if device does not support vlan
+ *
+ * The following fields of dmb must provide valid input:
+ * @rgid: gid of remote user device
+ * @dmb_len: buffer length
+ * @idx: Optionally:requested idx (if non-zero)
+ * @vlan_valid: if zero, vlan_id will be ignored;
+ * deprecated, ignored if device does not support vlan
+ * @vlan_id: deprecated, ignored if device does not support vlan
+ * Upon return in addition the following fields will be valid:
+ * @dmb_tok: for usage by remote and local devices and clients
+ * @cpu_addr: allocated buffer
+ * @idx: dmb index, unique per dibs device
+ * @dma_addr: to be used by device driver,if applicable
+ *
+ * Allocate a dmb buffer and register it with this device and for this
+ * client.
+ * Return: zero on success
+ */
+ int (*register_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb,
+ struct dibs_client *client);
+ /**
+ * unregister_dmb() - unregister and free a dmb
+ * @dev: dibs device
+ * @dmb: dmb struct to be unregistered
+ * The following fields of dmb must provide valid input:
+ * @dmb_tok
+ * @cpu_addr
+ * @idx
+ *
+ * Free dmb.cpu_addr and unregister the dmb from this device.
+ * Return: zero on success
+ */
+ int (*unregister_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * move_data() - write into a remote dmb
+ * @dev: Local sending dibs device
+ * @dmb_tok: Token of the remote dmb
+ * @idx: signaling index in dmbemask
+ * @sf: signaling flag;
+ * if true, idx will be turned on at target dmbemask mask
+ * and target device will be signaled.
+ * @offset: offset within target dmb
+ * @data: pointer to data to be sent
+ * @size: length of data to be sent, can be zero.
+ *
+ * Use dev to write data of size at offset into a remote dmb
+ * identified by dmb_tok. Data is moved synchronously, *data can
+ * be freed when this function returns.
+ *
+ * If signaling flag (sf) is true, bit number idx bit will be turned
+ * on in the dmbemask mask when handle_irq() is called at the remote
+ * dibs client that owns the target dmb. The target device may chose
+ * to coalesce the signaling triggers of multiple move_data() calls
+ * to the same target dmb into a single handle_irq() call.
+ * Return: zero on success
+ */
+ int (*move_data)(struct dibs_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+ /**
+ * add_vlan_id() - add dibs device to vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ *
+ * In order to write into a vlan-tagged dmb, the remote device needs
+ * to belong to the this vlan. A device can belong to more than 1 vlan.
+ * Any device can access an untagged dmb.
+ * Deprecated, only supported for backwards compatibility.
+ * Return: zero on success
+ */
+ int (*add_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * del_vlan_id() - remove dibs device from vlan (optional, deprecated)
+ * @dev: dibs device
+ * @vlan_id: vlan id
+ * Return: zero on success
+ */
+ int (*del_vlan_id)(struct dibs_dev *dev, u64 vlan_id);
+ /**
+ * signal_event() - trigger an event at a remote dibs device (optional)
+ * @dev: local dibs device
+ * @rgid: gid of remote dibs device
+ * trigger_irq: zero: notification may be coalesced with other events
+ * non-zero: notify immediately
+ * @subtype: 4 byte event code, meaning is defined by dibs client
+ * @data: 8 bytes of additional information,
+ * meaning is defined by dibs client
+ *
+ * dibs devices can offer support for sending a control event of type
+ * EVENT_SWR to a remote dibs device.
+ * NOTE: handle_event() will be called for all registered dibs clients
+ * at the remote device.
+ * Return: zero on success
+ */
+ int (*signal_event)(struct dibs_dev *dev, const uuid_t *rgid,
+ u32 trigger_irq, u32 event_code, u64 info);
+ /**
+ * support_mmapped_rdmb() - can this device provide memory mapped
+ * remote dmbs? (optional)
+ * @dev: dibs device
+ *
+ * A dibs device can provide a kernel address + length, that represent
+ * a remote target dmb (like MMIO). Alternatively to calling
+ * move_data(), a dibs client can write into such a ghost-send-buffer
+ * (= to this kernel address) and the data will automatically
+ * immediately appear in the target dmb, even without calling
+ * move_data().
+ *
+ * Either all 3 function pointers for support_dmb_nocopy(),
+ * attach_dmb() and detach_dmb() are defined, or all of them must
+ * be NULL.
+ *
+ * Return: non-zero, if memory mapped remote dmbs are supported.
+ */
+ int (*support_mmapped_rdmb)(struct dibs_dev *dev);
+ /**
+ * attach_dmb() - attach local memory to a remote dmb
+ * @dev: Local sending ism device
+ * @dmb: all other parameters are passed in the form of a
+ * dmb struct
+ * TODO: (THIS IS CONFUSING, should be changed)
+ * dmb_tok: (in) Token of the remote dmb, we want to attach to
+ * cpu_addr: (out) MMIO address
+ * dma_addr: (out) MMIO address (if applicable, invalid otherwise)
+ * dmb_len: (out) length of local MMIO region,
+ * equal to length of remote DMB.
+ * sba_idx: (out) index of remote dmb (NOT HELPFUL, should be removed)
+ *
+ * Provides a memory address to the sender that can be used to
+ * directly write into the remote dmb.
+ * Memory is available until detach_dmb is called
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*attach_dmb)(struct dibs_dev *dev, struct dibs_dmb *dmb);
+ /**
+ * detach_dmb() - Detach the ghost buffer from a remote dmb
+ * @dev: ism device
+ * @token: dmb token of the remote dmb
+ *
+ * No need to free cpu_addr.
+ *
+ * Return: Zero upon success, Error code otherwise
+ */
+ int (*detach_dmb)(struct dibs_dev *dev, u64 token);
+};
+
+struct dibs_dev {
+ struct list_head list;
+ struct device dev;
+ /* To be filled by device driver, before calling dibs_dev_add(): */
+ const struct dibs_dev_ops *ops;
+ uuid_t gid;
+ /* priv pointer for device driver */
+ void *drv_priv;
+
+ /* priv pointer per client; for client usage only */
+ void *priv[MAX_DIBS_CLIENTS];
+
+ /* get this lock before accessing any of the fields below */
+ spinlock_t lock;
+ /* array of client ids indexed by dmb idx;
+ * can be used as indices into priv and subs arrays
+ */
+ u8 *dmb_clientid_arr;
+ /* Sparse array of all ISM clients */
+ struct dibs_client *subs[MAX_DIBS_CLIENTS];
+};
+
+static inline void dibs_set_priv(struct dibs_dev *dev,
+ struct dibs_client *client, void *priv)
+{
+ dev->priv[client->id] = priv;
+}
+
+static inline void *dibs_get_priv(struct dibs_dev *dev,
+ struct dibs_client *client)
+{
+ return dev->priv[client->id];
+}
+
+/* ------- End of client-only functions ----------- */
+
+/* Functions to be called by dibs device drivers:
+ */
+/**
+ * dibs_dev_alloc() - allocate and reference device structure
+ *
+ * The following fields will be valid upon successful return: dev
+ * NOTE: Use put_device(dibs_get_dev(@dibs)) to give up your reference instead
+ * of freeing @dibs @dev directly once you have successfully called this
+ * function.
+ * Return: Pointer to dibs device structure
+ */
+struct dibs_dev *dibs_dev_alloc(void);
+/**
+ * dibs_dev_add() - register with dibs layer and all clients
+ * @dibs: dibs device
+ *
+ * The following fields must be valid upon entry: dev, ops, drv_priv
+ * All fields will be valid upon successful return.
+ * Return: zero on success
+ */
+int dibs_dev_add(struct dibs_dev *dibs);
+/**
+ * dibs_dev_del() - unregister from dibs layer and all clients
+ * @dibs: dibs device
+ */
+void dibs_dev_del(struct dibs_dev *dibs);
+
+#endif /* _DIBS_H */
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index f3bc0bcd7098..c249912456f9 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -149,7 +149,5 @@ void dma_direct_free_pages(struct device *dev, size_t size,
struct page *page, dma_addr_t dma_addr,
enum dma_data_direction dir);
int dma_direct_supported(struct device *dev, u64 mask);
-dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 332b80c42b6f..10882d00cb17 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -395,15 +395,15 @@ void *arch_dma_set_uncached(void *addr, size_t size);
void arch_dma_clear_uncached(void *addr, size_t size);
#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
-bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
-bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
+bool arch_dma_map_phys_direct(struct device *dev, phys_addr_t addr);
+bool arch_dma_unmap_phys_direct(struct device *dev, dma_addr_t dma_handle);
bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
int nents);
#else
-#define arch_dma_map_page_direct(d, a) (false)
-#define arch_dma_unmap_page_direct(d, a) (false)
+#define arch_dma_map_phys_direct(d, a) (false)
+#define arch_dma_unmap_phys_direct(d, a) (false)
#define arch_dma_map_sg_direct(d, s, n) (false)
#define arch_dma_unmap_sg_direct(d, s, n) (false)
#endif
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 55c03e5fe8cb..8248ff9363ee 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -59,6 +59,26 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
+ * DMA_ATTR_MMIO - Indicates memory-mapped I/O (MMIO) region for DMA mapping
+ *
+ * This attribute indicates the physical address is not normal system
+ * memory. It may not be used with kmap*()/phys_to_virt()/phys_to_page()
+ * functions, it may not be cacheable, and access using CPU load/store
+ * instructions may not be allowed.
+ *
+ * Usually this will be used to describe MMIO addresses, or other non-cacheable
+ * register addresses. When DMA mapping this sort of address we call
+ * the operation Peer to Peer as a one device is DMA'ing to another device.
+ * For PCI devices the p2pdma APIs must be used to determine if DMA_ATTR_MMIO
+ * is appropriate.
+ *
+ * For architectures that require cache flushing for DMA coherence
+ * DMA_ATTR_MMIO will not perform any cache flushing. The address
+ * provided must never be mapped cacheable into the CPU.
+ */
+#define DMA_ATTR_MMIO (1UL << 10)
+
+/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
* be given to a device to use as a DMA source or target. It is specific to a
* given device and there may be a translation between the CPU physical address
@@ -118,6 +138,10 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
unsigned long attrs);
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs);
+dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
@@ -172,6 +196,15 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
+static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ return DMA_MAPPING_ERROR;
+}
+static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+}
static inline unsigned int dma_map_sg_attrs(struct device *dev,
struct scatterlist *sg, int nents, enum dma_data_direction dir,
unsigned long attrs)
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
index fa1e76920d0e..25be745bf41f 100644
--- a/include/linux/dpll.h
+++ b/include/linux/dpll.h
@@ -38,6 +38,12 @@ struct dpll_device_ops {
void *dpll_priv,
enum dpll_feature_state *state,
struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_set)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack);
+ int (*phase_offset_avg_factor_get)(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack);
};
struct dpll_pin_ops {
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index d7d757e72554..c2d8b4ec62eb 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -492,7 +492,29 @@ struct ethtool_pause_stats {
};
#define ETHTOOL_MAX_LANES 8
+/**
+ * IEEE 802.3ck/df defines 16 bins for FEC histogram plus one more for
+ * the end-of-list marker, total 17 items
+ */
+#define ETHTOOL_FEC_HIST_MAX 17
+/**
+ * struct ethtool_fec_hist_range - error bits range for FEC histogram
+ * statistics
+ * @low: low bound of the bin (inclusive)
+ * @high: high bound of the bin (inclusive)
+ */
+struct ethtool_fec_hist_range {
+ u16 low;
+ u16 high;
+};
+struct ethtool_fec_hist {
+ struct ethtool_fec_hist_value {
+ u64 sum;
+ u64 per_lane[ETHTOOL_MAX_LANES];
+ } values[ETHTOOL_FEC_HIST_MAX];
+ const struct ethtool_fec_hist_range *ranges;
+};
/**
* struct ethtool_fec_stats - statistics for IEEE 802.3 FEC
* @corrected_blocks: number of received blocks corrected by FEC
@@ -968,6 +990,7 @@ struct kernel_ethtool_ts_info {
* @reset: Reset (part of) the device, as specified by a bitmask of
* flags from &enum ethtool_reset_flags. Returns a negative
* error code or zero.
+ * @get_rx_ring_count: Return the number of RX rings
* @get_rxfh_key_size: Get the size of the RX flow hash key.
* Returns zero if not supported for this specific device.
* @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
@@ -1162,6 +1185,7 @@ struct ethtool_ops {
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
+ u32 (*get_rx_ring_count)(struct net_device *dev);
u32 (*get_rxfh_key_size)(struct net_device *);
u32 (*get_rxfh_indir_size)(struct net_device *);
int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *);
@@ -1212,7 +1236,8 @@ struct ethtool_ops {
int (*set_link_ksettings)(struct net_device *,
const struct ethtool_link_ksettings *);
void (*get_fec_stats)(struct net_device *dev,
- struct ethtool_fec_stats *fec_stats);
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist);
int (*get_fecparam)(struct net_device *,
struct ethtool_fecparam *);
int (*set_fecparam)(struct net_device *,
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 3aac58a520c7..d0cf10d5e0f7 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -276,7 +276,7 @@ struct export_operations {
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
int (*permission)(struct handle_to_path_ctx *ctx, unsigned int oflags);
- struct file * (*open)(struct path *path, unsigned int oflags);
+ struct file * (*open)(const struct path *path, unsigned int oflags);
#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 2f8b8bfc0e73..6afb4a13b81d 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -79,6 +79,7 @@ enum stop_cp_reason {
STOP_CP_REASON_FLUSH_FAIL,
STOP_CP_REASON_NO_SEGMENT,
STOP_CP_REASON_CORRUPTED_FREE_BITMAP,
+ STOP_CP_REASON_CORRUPTED_NID,
STOP_CP_REASON_MAX,
};
diff --git a/include/linux/fbcon.h b/include/linux/fbcon.h
index 2382dec6d6ab..81f0e698acbf 100644
--- a/include/linux/fbcon.h
+++ b/include/linux/fbcon.h
@@ -1,6 +1,13 @@
#ifndef _LINUX_FBCON_H
#define _LINUX_FBCON_H
+#include <linux/compiler_types.h>
+
+struct fb_blit_caps;
+struct fb_info;
+struct fb_var_screeninfo;
+struct fb_videomode;
+
#ifdef CONFIG_FRAMEBUFFER_CONSOLE
void __init fb_console_init(void);
void __exit fb_console_exit(void);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index d38c6e538e5c..6d208769d456 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -88,23 +88,30 @@ struct fw_card {
int node_id;
int generation;
- int current_tlabel;
- u64 tlabel_mask;
- struct list_head transaction_list;
u64 reset_jiffies;
- u32 split_timeout_hi;
- u32 split_timeout_lo;
- unsigned int split_timeout_cycles;
- unsigned int split_timeout_jiffies;
+ struct {
+ int current_tlabel;
+ u64 tlabel_mask;
+ struct list_head list;
+ spinlock_t lock;
+ } transactions;
+
+ struct {
+ u32 hi;
+ u32 lo;
+ unsigned int cycles;
+ unsigned int jiffies;
+ spinlock_t lock;
+ } split_timeout;
unsigned long long guid;
unsigned max_receive;
int link_speed;
int config_rom_generation;
- spinlock_t lock; /* Take this lock when handling the lists in
- * this struct. */
+ spinlock_t lock;
+
struct fw_node *local_node;
struct fw_node *root_node;
struct fw_node *irm_node;
@@ -115,8 +122,6 @@ struct fw_card {
int index;
struct list_head link;
- struct list_head phy_receiver_list;
-
struct delayed_work br_work; /* bus reset job */
bool br_short;
@@ -131,7 +136,11 @@ struct fw_card {
bool broadcast_channel_allocated;
u32 broadcast_channel;
- __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+
+ struct {
+ __be32 buffer[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+ spinlock_t lock;
+ } topology_map;
__be32 maint_utility_register;
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index 0f667bf1d4d9..a55ca771286b 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -175,4 +175,10 @@ static inline int qcom_scm_qseecom_app_send(u32 app_id,
#endif /* CONFIG_QCOM_QSEECOM */
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type);
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type);
+
#endif
diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h
index b83b63a0c049..48ac0e5454c7 100644
--- a/include/linux/firmware/qcom/qcom_tzmem.h
+++ b/include/linux/firmware/qcom/qcom_tzmem.h
@@ -53,4 +53,19 @@ DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T))
phys_addr_t qcom_tzmem_to_phys(void *ptr);
+#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle);
+void qcom_tzmem_shm_bridge_delete(u64 handle);
+#else
+static inline int qcom_tzmem_shm_bridge_create(phys_addr_t paddr,
+ size_t size, u64 *handle)
+{
+ return 0;
+}
+
+static inline void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+}
+#endif
+
#endif /* __QCOM_TZMEM */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index b303472255be..32884c9721e5 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -47,6 +47,7 @@ extern int freeze_processes(void);
extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
extern void thaw_kernel_threads(void);
+extern void thaw_process(struct task_struct *p);
static inline bool try_to_freeze(void)
{
@@ -80,6 +81,7 @@ static inline int freeze_processes(void) { return -ENOSYS; }
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline void thaw_kernel_threads(void) {}
+static inline void thaw_process(struct task_struct *p) {}
static inline bool try_to_freeze(void) { return false; }
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9e9d7c757efe..540004970ad5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -537,7 +537,7 @@ struct address_space {
/*
* Returns true if any of the pages in the mapping are marked with the tag.
*/
-static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag)
+static inline bool mapping_tagged(const struct address_space *mapping, xa_mark_t tag)
{
return xa_marked(&mapping->i_pages, tag);
}
@@ -585,7 +585,7 @@ static inline void i_mmap_assert_write_locked(struct address_space *mapping)
/*
* Might pages of this file be mapped into userspace?
*/
-static inline int mapping_mapped(struct address_space *mapping)
+static inline int mapping_mapped(const struct address_space *mapping)
{
return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root);
}
@@ -599,7 +599,7 @@ static inline int mapping_mapped(struct address_space *mapping)
* If i_mmap_writable is negative, no new writable mappings are allowed. You
* can only deny writable mappings, if none exists right now.
*/
-static inline int mapping_writably_mapped(struct address_space *mapping)
+static inline int mapping_writably_mapped(const struct address_space *mapping)
{
return atomic_read(&mapping->i_mmap_writable) > 0;
}
@@ -1192,6 +1192,8 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
* @f_cred: stashed credentials of creator/opener
* @f_owner: file owner
* @f_path: path of the file
+ * @__f_path: writable alias for @f_path; *ONLY* for core VFS and only before
+ * the file gets open
* @f_pos_lock: lock protecting file position
* @f_pipe: specific to pipes
* @f_pos: file position
@@ -1217,7 +1219,10 @@ struct file {
const struct cred *f_cred;
struct fown_struct *f_owner;
/* --- cacheline 1 boundary (64 bytes) --- */
- struct path f_path;
+ union {
+ const struct path f_path;
+ struct path __f_path;
+ };
union {
/* regular files (with FMODE_ATOMIC_POS) and directories */
struct mutex f_pos_lock;
@@ -1434,6 +1439,8 @@ struct sb_writers {
struct percpu_rw_semaphore rw_sem[SB_FREEZE_LEVELS];
};
+struct mount;
+
struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
@@ -1468,7 +1475,7 @@ struct super_block {
__u16 s_encoding_flags;
#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
- struct list_head s_mounts; /* list of mounts; _not_ for fs use */
+ struct mount *s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev; /* can go away once we use an accessor for @s_bdev_file */
struct file *s_bdev_file;
struct backing_dev_info *s_bdi;
@@ -2385,6 +2392,8 @@ static inline bool can_mmap_file(struct file *file)
return true;
}
+int __compat_vma_mmap_prepare(const struct file_operations *f_op,
+ struct file *file, struct vm_area_struct *vma);
int compat_vma_mmap_prepare(struct file *file, struct vm_area_struct *vma);
static inline int vfs_mmap(struct file *file, struct vm_area_struct *vma)
@@ -2873,7 +2882,7 @@ struct file *dentry_open_nonotify(const struct path *path, int flags,
const struct cred *cred);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct path *backing_file_user_path(const struct file *f);
+const struct path *backing_file_user_path(const struct file *f);
/*
* When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
@@ -3712,7 +3721,8 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
* happens when a directory is casefolded and the filesystem is strict
* about its encoding.
*/
-static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name)
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
{
if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb))
return true;
@@ -3727,18 +3737,42 @@ static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qst
return !utf8_validate(dir->i_sb->s_encoding, name);
}
#else
-static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name)
+static inline bool generic_ci_validate_strict_name(struct inode *dir,
+ const struct qstr *name)
{
return true;
}
#endif
+static inline struct unicode_map *sb_encoding(const struct super_block *sb)
+{
+#if IS_ENABLED(CONFIG_UNICODE)
+ return sb->s_encoding;
+#else
+ return NULL;
+#endif
+}
+
static inline bool sb_has_encoding(const struct super_block *sb)
{
+ return !!sb_encoding(sb);
+}
+
+/*
+ * Compare if two super blocks have the same encoding and flags
+ */
+static inline bool sb_same_encoding(const struct super_block *sb1,
+ const struct super_block *sb2)
+{
#if IS_ENABLED(CONFIG_UNICODE)
- return !!sb->s_encoding;
+ if (sb1->s_encoding == sb2->s_encoding)
+ return true;
+
+ return (sb1->s_encoding && sb2->s_encoding &&
+ (sb1->s_encoding->version == sb2->s_encoding->version) &&
+ (sb1->s_encoding_flags == sb2->s_encoding_flags));
#else
- return false;
+ return true;
#endif
}
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 671f031be173..0d6c8a6d7be2 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -134,8 +134,13 @@ extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_ty
extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
-extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
- const char *value, size_t v_size);
+extern int vfs_parse_fs_qstr(struct fs_context *fc, const char *key,
+ const struct qstr *value);
+static inline int vfs_parse_fs_string(struct fs_context *fc, const char *key,
+ const char *value)
+{
+ return vfs_parse_fs_qstr(fc, key, value ? &QSTR(value) : NULL);
+}
int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
char *(*sep)(char **));
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b301bf7199d3..3601e25779ba 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -145,7 +145,6 @@ struct ptp_qoriq {
struct ptp_clock *clock;
struct ptp_clock_info caps;
struct resource *rsrc;
- struct dentry *debugfs_root;
struct device *dev;
bool extts_fifo_support;
bool fiper3_support;
@@ -195,14 +194,5 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
-#ifdef CONFIG_DEBUG_FS
-void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
-void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
-#else
-static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq)
-{ }
-static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq)
-{ }
-#endif
#endif
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index d4034ddaf392..0d954ea7b179 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -273,6 +273,8 @@ struct fsnotify_group {
int f_flags; /* event_f_flags from fanotify_init() */
struct ucounts *ucounts;
mempool_t error_events_pool;
+ /* chained on perm_group_list */
+ struct list_head perm_grp_list;
} fanotify_data;
#endif /* CONFIG_FANOTIFY */
};
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5ebf26fcdcfa..0ceb4e09306c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -354,7 +354,7 @@ static inline struct page *alloc_page_vma_noprof(gfp_t gfp,
}
#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__))
-struct page *alloc_pages_nolock_noprof(int nid, unsigned int order);
+struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order);
#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__))
extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order);
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
index 7ed3fdd55dda..45f181bcf890 100644
--- a/include/linux/habanalabs/cpucp_if.h
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -1425,9 +1425,13 @@ struct cpucp_monitor_dump {
* from "pkt_subidx" field in struct cpucp_packet.
*
* HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ * HL_GET_ERR_COUNTERS_CMD - Command to get error counters
+ * HL_GET_P_STATE - get performance state
*/
enum hl_passthrough_type {
HL_PASSTHROUGH_VERSIONS,
+ HL_GET_ERR_COUNTERS_CMD,
+ HL_GET_P_STATE,
};
#endif /* CPUCP_IF_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2cc4f1e4ea96..c32425b5d011 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -364,6 +364,7 @@ struct hid_item {
* | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
* | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
* | @HID_QUIRK_IGNORE_SPECIAL_DRIVER
+ * | @HID_QUIRK_POWER_ON_AFTER_BACKLIGHT
* | @HID_QUIRK_FULLSPEED_INTERVAL:
* | @HID_QUIRK_NO_INIT_REPORTS:
* | @HID_QUIRK_NO_IGNORE:
@@ -391,6 +392,7 @@ struct hid_item {
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
#define HID_QUIRK_NOINVERT BIT(21)
#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22)
+#define HID_QUIRK_POWER_ON_AFTER_BACKLIGHT BIT(23)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 36053c3d6d64..0574c21ca45d 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -7,7 +7,7 @@
*/
#ifdef CONFIG_KMAP_LOCAL
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
-void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
void kunmap_local_indexed(const void *vaddr);
void kmap_local_fork(struct task_struct *tsk);
void __kmap_local_sched_out(void);
@@ -33,7 +33,7 @@ static inline void kmap_flush_tlb(unsigned long addr) { }
#endif
void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
+void kunmap_high(const struct page *page);
void __kmap_flush_unused(void);
struct page *__kmap_to_page(void *addr);
@@ -50,7 +50,7 @@ static inline void *kmap(struct page *page)
return addr;
}
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
might_sleep();
if (!PageHighMem(page))
@@ -68,12 +68,12 @@ static inline void kmap_flush_unused(void)
__kmap_flush_unused();
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
{
return __kmap_local_page_prot(page, kmap_prot);
}
-static inline void *kmap_local_page_try_from_panic(struct page *page)
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
{
if (!PageHighMem(page))
return page_address(page);
@@ -81,13 +81,13 @@ static inline void *kmap_local_page_try_from_panic(struct page *page)
return NULL;
}
-static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
{
- struct page *page = folio_page(folio, offset / PAGE_SIZE);
+ const struct page *page = folio_page(folio, offset / PAGE_SIZE);
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return __kmap_local_page_prot(page, prot);
}
@@ -102,7 +102,7 @@ static inline void __kunmap_local(const void *vaddr)
kunmap_local_indexed(vaddr);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
@@ -113,7 +113,7 @@ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
return __kmap_local_page_prot(page, prot);
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
@@ -173,32 +173,32 @@ static inline void *kmap(struct page *page)
return page_address(page);
}
-static inline void kunmap_high(struct page *page) { }
+static inline void kunmap_high(const struct page *page) { }
static inline void kmap_flush_unused(void) { }
-static inline void kunmap(struct page *page)
+static inline void kunmap(const struct page *page)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(page_address(page));
#endif
}
-static inline void *kmap_local_page(struct page *page)
+static inline void *kmap_local_page(const struct page *page)
{
return page_address(page);
}
-static inline void *kmap_local_page_try_from_panic(struct page *page)
+static inline void *kmap_local_page_try_from_panic(const struct page *page)
{
return page_address(page);
}
-static inline void *kmap_local_folio(struct folio *folio, size_t offset)
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
{
return folio_address(folio) + offset;
}
-static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
{
return kmap_local_page(page);
}
@@ -215,7 +215,7 @@ static inline void __kunmap_local(const void *addr)
#endif
}
-static inline void *kmap_atomic(struct page *page)
+static inline void *kmap_atomic(const struct page *page)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
migrate_disable();
@@ -225,7 +225,7 @@ static inline void *kmap_atomic(struct page *page)
return page_address(page);
}
-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
{
return kmap_atomic(page);
}
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 6234f316468c..105cc4c00cc3 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -43,7 +43,7 @@ static inline void *kmap(struct page *page);
* Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
* pages in the low memory area.
*/
-static inline void kunmap(struct page *page);
+static inline void kunmap(const struct page *page);
/**
* kmap_to_page - Get the page for a kmap'ed address
@@ -93,7 +93,7 @@ static inline void kmap_flush_unused(void);
* disabling migration in order to keep the virtual address stable across
* preemption. No caller of kmap_local_page() can rely on this side effect.
*/
-static inline void *kmap_local_page(struct page *page);
+static inline void *kmap_local_page(const struct page *page);
/**
* kmap_local_folio - Map a page in this folio for temporary usage
@@ -129,7 +129,7 @@ static inline void *kmap_local_page(struct page *page);
* Context: Can be invoked from any context.
* Return: The virtual address of @offset.
*/
-static inline void *kmap_local_folio(struct folio *folio, size_t offset);
+static inline void *kmap_local_folio(const struct folio *folio, size_t offset);
/**
* kmap_atomic - Atomically map a page for temporary usage - Deprecated!
@@ -176,7 +176,7 @@ static inline void *kmap_local_folio(struct folio *folio, size_t offset);
* kunmap_atomic(vaddr2);
* kunmap_atomic(vaddr1);
*/
-static inline void *kmap_atomic(struct page *page);
+static inline void *kmap_atomic(const struct page *page);
/* Highmem related interfaces for management code */
static inline unsigned long nr_free_highpages(void);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7748489fde1b..f327d62fc985 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -94,12 +94,15 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define THP_ORDERS_ALL \
(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
-#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
-#define TVA_IN_PF (1 << 1) /* Page fault handler */
-#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
+enum tva_type {
+ TVA_SMAPS, /* Exposing "THPeligible:" in smaps. */
+ TVA_PAGEFAULT, /* Serving a page fault. */
+ TVA_KHUGEPAGED, /* Khugepaged collapse. */
+ TVA_FORCED_COLLAPSE, /* Forced collapse (e.g. MADV_COLLAPSE). */
+};
-#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
- (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
+#define thp_vma_allowable_order(vma, vm_flags, type, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, type, BIT(order)))
#define split_folio(f) split_folio_to_list(f, NULL)
@@ -264,14 +267,14 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
vm_flags_t vm_flags,
- unsigned long tva_flags,
+ enum tva_type type,
unsigned long orders);
/**
* thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
* @vma: the vm area to check
* @vm_flags: use these vm_flags instead of vma->vm_flags
- * @tva_flags: Which TVA flags to honour
+ * @type: TVA type
* @orders: bitfield of all orders to consider
*
* Calculates the intersection of the requested hugepage orders and the allowed
@@ -285,11 +288,14 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
static inline
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
vm_flags_t vm_flags,
- unsigned long tva_flags,
+ enum tva_type type,
unsigned long orders)
{
- /* Optimization to check if required orders are enabled early. */
- if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
+ /*
+ * Optimization to check if required orders are enabled early. Only
+ * forced collapse ignores sysfs configs.
+ */
+ if (type != TVA_FORCED_COLLAPSE && vma_is_anonymous(vma)) {
unsigned long mask = READ_ONCE(huge_anon_orders_always);
if (vm_flags & VM_HUGEPAGE)
@@ -303,7 +309,7 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
return 0;
}
- return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
+ return __thp_vma_allowable_orders(vma, vm_flags, type, orders);
}
struct thpsize {
@@ -318,16 +324,32 @@ struct thpsize {
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
+/*
+ * Check whether THPs are explicitly disabled for this VMA, for example,
+ * through madvise or prctl.
+ */
static inline bool vma_thp_disabled(struct vm_area_struct *vma,
- vm_flags_t vm_flags)
-{
+ vm_flags_t vm_flags, bool forced_collapse)
+{
+ /* Are THPs disabled for this VMA? */
+ if (vm_flags & VM_NOHUGEPAGE)
+ return true;
+ /* Are THPs disabled for all VMAs in the whole process? */
+ if (mm_flags_test(MMF_DISABLE_THP_COMPLETELY, vma->vm_mm))
+ return true;
/*
- * Explicitly disabled through madvise or prctl, or some
- * architectures may disable THP for some mappings, for
- * example, s390 kvm.
+ * Are THPs disabled only for VMAs where we didn't get an explicit
+ * advise to use them?
*/
- return (vm_flags & VM_NOHUGEPAGE) ||
- test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
+ if (vm_flags & VM_HUGEPAGE)
+ return false;
+ /*
+ * Forcing a collapse (e.g., madv_collapse), is a clear advice to
+ * use THPs.
+ */
+ if (forced_collapse)
+ return false;
+ return mm_flags_test(MMF_DISABLE_THP_EXCEPT_ADVISED, vma->vm_mm);
}
static inline bool thp_disabled_by_hw(void)
@@ -479,6 +501,8 @@ extern unsigned long huge_zero_pfn;
static inline bool is_huge_zero_folio(const struct folio *folio)
{
+ VM_WARN_ON_ONCE(!folio);
+
return READ_ONCE(huge_zero_folio) == folio;
}
@@ -495,6 +519,17 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
void mm_put_huge_zero_folio(struct mm_struct *mm);
+static inline struct folio *get_persistent_huge_zero_folio(void)
+{
+ if (!IS_ENABLED(CONFIG_PERSISTENT_HUGE_ZERO_FOLIO))
+ return NULL;
+
+ if (unlikely(!huge_zero_folio))
+ return NULL;
+
+ return huge_zero_folio;
+}
+
static inline bool thp_migration_supported(void)
{
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
@@ -526,7 +561,7 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
vm_flags_t vm_flags,
- unsigned long tva_flags,
+ enum tva_type type,
unsigned long orders)
{
return 0;
@@ -553,22 +588,26 @@ static inline int
split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
}
static inline int split_huge_page(struct page *page)
{
- return 0;
+ VM_WARN_ON_ONCE_PAGE(1, page);
+ return -EINVAL;
}
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
{
- return 0;
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
}
static inline int try_folio_split(struct folio *folio, struct page *page,
struct list_head *list)
{
- return 0;
+ VM_WARN_ON_ONCE_FOLIO(1, folio);
+ return -EINVAL;
}
static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
@@ -685,6 +724,11 @@ static inline int change_huge_pud(struct mmu_gather *tlb,
{
return 0;
}
+
+static inline struct folio *get_persistent_huge_zero_folio(void)
+{
+ return NULL;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline int split_folio_to_list_to_order(struct folio *folio,
@@ -698,4 +742,26 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}
+/**
+ * largest_zero_folio - Get the largest zero size folio available
+ *
+ * This function shall be used when mm_get_huge_zero_folio() cannot be
+ * used as there is no appropriate mm lifetime to tie the huge zero folio
+ * from the caller.
+ *
+ * Deduce the size of the folio with folio_size instead of assuming the
+ * folio size.
+ *
+ * Return: pointer to PMD sized zero folio if CONFIG_PERSISTENT_HUGE_ZERO_FOLIO
+ * is enabled or a single page sized zero folio
+ */
+static inline struct folio *largest_zero_folio(void)
+{
+ struct folio *folio = get_persistent_huge_zero_folio();
+
+ if (folio)
+ return folio;
+
+ return page_folio(ZERO_PAGE(0));
+}
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 526d27e88b3b..8e63e46b8e1f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -788,9 +788,14 @@ static inline unsigned huge_page_shift(struct hstate *h)
return h->order + PAGE_SHIFT;
}
+static inline bool order_is_gigantic(unsigned int order)
+{
+ return order > MAX_PAGE_ORDER;
+}
+
static inline bool hstate_is_gigantic(struct hstate *h)
{
- return huge_page_order(h) > MAX_PAGE_ORDER;
+ return order_is_gigantic(huge_page_order(h));
}
static inline unsigned int pages_per_huge_page(const struct hstate *h)
diff --git a/include/linux/hw_bitfield.h b/include/linux/hw_bitfield.h
new file mode 100644
index 000000000000..df202e167ce4
--- /dev/null
+++ b/include/linux/hw_bitfield.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2025, Collabora Ltd.
+ */
+
+#ifndef _LINUX_HW_BITFIELD_H
+#define _LINUX_HW_BITFIELD_H
+
+#include <linux/bitfield.h>
+#include <linux/build_bug.h>
+#include <linux/limits.h>
+
+/**
+ * FIELD_PREP_WM16() - prepare a bitfield element with a mask in the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16() masks and shifts up the value, as well as bitwise ORs the
+ * result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ */
+#define FIELD_PREP_WM16(_mask, _val) \
+ ({ \
+ typeof(_val) __val = _val; \
+ typeof(_mask) __mask = _mask; \
+ __BF_FIELD_CHECK(__mask, ((u16)0U), __val, \
+ "HWORD_UPDATE: "); \
+ (((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) | \
+ ((__mask) << 16); \
+ })
+
+/**
+ * FIELD_PREP_WM16_CONST() - prepare a constant bitfield element with a mask in
+ * the upper half
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP_WM16_CONST() masks and shifts up the value, as well as bitwise ORs
+ * the result with the mask shifted up by 16.
+ *
+ * This is useful for a common design of hardware registers where the upper
+ * 16-bit half of a 32-bit register is used as a write-enable mask. In such a
+ * register, a bit in the lower half is only updated if the corresponding bit
+ * in the upper half is high.
+ *
+ * Unlike FIELD_PREP_WM16(), this is a constant expression and can therefore
+ * be used in initializers. Error checking is less comfortable for this
+ * version.
+ */
+#define FIELD_PREP_WM16_CONST(_mask, _val) \
+ ( \
+ FIELD_PREP_CONST(_mask, _val) | \
+ (BUILD_BUG_ON_ZERO(const_true((u64)(_mask) > U16_MAX)) + \
+ ((_mask) << 16)) \
+ )
+
+
+#endif /* _LINUX_HW_BITFIELD_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 20fd41b51d5c..11a19241e360 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -499,7 +499,7 @@ static inline struct i2c_client *i2c_verify_client(struct device *dev)
* Modules for add-on boards must use other calls.
*/
#ifdef CONFIG_I2C_BOARDINFO
-int
+int __init
i2c_register_board_info(int busnum, struct i2c_board_info const *info,
unsigned n);
#else
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 043f5c7ff398..c52a82dd79a6 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -558,6 +558,26 @@ struct i3c_master_controller {
#define i3c_bus_for_each_i3cdev(bus, dev) \
list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+/**
+ * struct i3c_dma - DMA transfer and mapping descriptor
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @map_len: length of DMA mapping
+ * @addr: mapped DMA address for a Host Controller Driver
+ * @dir: DMA direction
+ * @bounce_buf: an allocated bounce buffer if transfer needs it or NULL
+ */
+struct i3c_dma {
+ struct device *dev;
+ void *buf;
+ size_t len;
+ size_t map_len;
+ dma_addr_t addr;
+ enum dma_data_direction dir;
+ void *bounce_buf;
+};
+
int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
const struct i2c_msg *xfers,
int nxfers);
@@ -575,6 +595,12 @@ int i3c_master_get_free_addr(struct i3c_master_controller *master,
int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
u8 addr);
int i3c_master_do_daa(struct i3c_master_controller *master);
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *ptr,
+ size_t len, bool force_bounce,
+ enum dma_data_direction dir);
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer);
+DEFINE_FREE(i3c_master_dma_unmap_single, void *,
+ if (_T) i3c_master_dma_unmap_single(_T))
int i3c_master_set_info(struct i3c_master_controller *master,
const struct i3c_device_info *info);
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 2267902d29a7..789e23e67444 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -334,14 +334,6 @@ static inline void ida_init(struct ida *ida)
xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
}
-/*
- * ida_simple_get() and ida_simple_remove() are deprecated. Use
- * ida_alloc() and ida_free() instead respectively.
- */
-#define ida_simple_get(ida, start, end, gfp) \
- ida_alloc_range(ida, start, (end) - 1, gfp)
-#define ida_simple_remove(ida, id) ida_free(ida, id)
-
static inline bool ida_is_empty(const struct ida *ida)
{
return xa_empty(&ida->xa);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index e5a2096e022e..ddff9102f633 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -220,6 +220,12 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_MAX_AID_S1G 8191
#define IEEE80211_MAX_TIM_LEN 251
#define IEEE80211_MAX_MESH_PEERINGS 63
+
+/* S1G encoding types */
+#define IEEE80211_S1G_TIM_ENC_MODE_BLOCK 0
+#define IEEE80211_S1G_TIM_ENC_MODE_SINGLE 1
+#define IEEE80211_S1G_TIM_ENC_MODE_OLB 2
+
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6.2.1.1.2.
@@ -1176,6 +1182,18 @@ enum ieee80211_s1g_chanwidth {
IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
};
+/**
+ * enum ieee80211_s1g_pri_chanwidth - S1G primary channel widths
+ * described in IEEE80211-2024 Table 10-39.
+ *
+ * @IEEE80211_S1G_PRI_CHANWIDTH_2MHZ: 2MHz primary channel
+ * @IEEE80211_S1G_PRI_CHANWIDTH_1MHZ: 1MHz primary channel
+ */
+enum ieee80211_s1g_pri_chanwidth {
+ IEEE80211_S1G_PRI_CHANWIDTH_2MHZ = 0,
+ IEEE80211_S1G_PRI_CHANWIDTH_1MHZ = 1,
+};
+
#define WLAN_SA_QUERY_TR_ID_LEN 2
#define WLAN_MEMBERSHIP_LEN 8
#define WLAN_USER_POSITION_LEN 16
@@ -3164,8 +3182,12 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
-#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
+#define S1G_OPER_CH_WIDTH_PRIMARY BIT(0)
#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+#define S1G_OPER_CH_PRIMARY_LOCATION BIT(5)
+
+#define S1G_2M_PRIMARY_LOCATION_LOWER 0
+#define S1G_2M_PRIMARY_LOCATION_UPPER 1
/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */
#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01
@@ -4757,15 +4779,8 @@ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
return 1024 * tu;
}
-/**
- * ieee80211_check_tim - check if AID bit is set in TIM
- * @tim: the TIM IE
- * @tim_len: length of the TIM IE
- * @aid: the AID to look for
- * Return: whether or not traffic is indicated in the TIM for the given AID
- */
-static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
- u8 tim_len, u16 aid)
+static inline bool __ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
{
u8 mask;
u8 index, indexn1, indexn2;
@@ -4788,6 +4803,254 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
return !!(tim->virtual_map[index] & mask);
}
+struct s1g_tim_aid {
+ u16 aid;
+ u8 target_blk; /* Target block index */
+ u8 target_subblk; /* Target subblock index */
+ u8 target_subblk_bit; /* Target subblock bit */
+};
+
+struct s1g_tim_enc_block {
+ u8 enc_mode;
+ bool inverse;
+ const u8 *ptr;
+ u8 len;
+
+ /*
+ * For an OLB encoded block that spans multiple blocks, this
+ * is the offset into the span described by that encoded block.
+ */
+ u8 olb_blk_offset;
+};
+
+/*
+ * Helper routines to quickly extract the length of an encoded block. Validation
+ * is also performed to ensure the length extracted lies within the TIM.
+ */
+
+static inline int ieee80211_s1g_len_bitmap(const u8 *ptr, const u8 *end)
+{
+ u8 blkmap;
+ u8 n_subblks;
+
+ if (ptr >= end)
+ return -EINVAL;
+
+ blkmap = *ptr;
+ n_subblks = hweight8(blkmap);
+
+ if (ptr + 1 + n_subblks > end)
+ return -EINVAL;
+
+ return 1 + n_subblks;
+}
+
+static inline int ieee80211_s1g_len_single(const u8 *ptr, const u8 *end)
+{
+ return (ptr + 1 > end) ? -EINVAL : 1;
+}
+
+static inline int ieee80211_s1g_len_olb(const u8 *ptr, const u8 *end)
+{
+ if (ptr >= end)
+ return -EINVAL;
+
+ return (ptr + 1 + *ptr > end) ? -EINVAL : 1 + *ptr;
+}
+
+/*
+ * Enumerate all encoded blocks until we find the encoded block that describes
+ * our target AID. OLB is a special case as a single encoded block can describe
+ * multiple blocks as a single encoded block.
+ */
+static inline int ieee80211_s1g_find_target_block(struct s1g_tim_enc_block *enc,
+ const struct s1g_tim_aid *aid,
+ const u8 *ptr, const u8 *end)
+{
+ /* need at least block-control octet */
+ while (ptr + 1 <= end) {
+ u8 ctrl = *ptr++;
+ u8 mode = ctrl & 0x03;
+ bool contains, inverse = ctrl & BIT(2);
+ u8 span, blk_off = ctrl >> 3;
+ int len;
+
+ switch (mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ len = ieee80211_s1g_len_bitmap(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ len = ieee80211_s1g_len_single(ptr, end);
+ contains = blk_off == aid->target_blk;
+ break;
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ len = ieee80211_s1g_len_olb(ptr, end);
+ /*
+ * An OLB encoded block can describe more then one
+ * block, meaning an encoded OLB block can span more
+ * then a single block.
+ */
+ if (len > 0) {
+ /* Minus one for the length octet */
+ span = DIV_ROUND_UP(len - 1, 8);
+ /*
+ * Check if our target block lies within the
+ * block span described by this encoded block.
+ */
+ contains = (aid->target_blk >= blk_off) &&
+ (aid->target_blk < blk_off + span);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (len < 0)
+ return len;
+
+ if (contains) {
+ enc->enc_mode = mode;
+ enc->inverse = inverse;
+ enc->ptr = ptr;
+ enc->len = (u8)len;
+ enc->olb_blk_offset = blk_off;
+ return 0;
+ }
+
+ ptr += len;
+ }
+
+ return -ENOENT;
+}
+
+static inline bool ieee80211_s1g_parse_bitmap(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blkmap = *ptr++;
+
+ /*
+ * If our block bitmap does not contain a set bit that corresponds
+ * to our AID, it could mean a variety of things depending on if
+ * the encoding mode is inverted or not.
+ *
+ * 1. If inverted, it means the entire subblock is present and hence
+ * our AID has been set.
+ * 2. If not inverted, it means our subblock is not present and hence
+ * it is all zero meaning our AID is not set.
+ */
+ if (!(blkmap & BIT(aid->target_subblk)))
+ return enc->inverse;
+
+ /*
+ * Increment ptr by the number of set subblocks that appear before our
+ * target subblock. If our target subblock is 0, do nothing as ptr
+ * already points to our target subblock.
+ */
+ if (aid->target_subblk)
+ ptr += hweight8(blkmap & GENMASK(aid->target_subblk - 1, 0));
+
+ return !!(*ptr & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_single(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ /*
+ * Single AID mode describes, as the name suggests, a single AID
+ * within the block described by the encoded block. The octet
+ * contains the 6 LSBs of the AID described in the block. The other
+ * 2 bits are reserved. When inversed, every single AID described
+ * by the current block have buffered traffic except for the AID
+ * described in the single AID octet.
+ */
+ return ((*enc->ptr & 0x3f) == (aid->aid & 0x3f)) ^ enc->inverse;
+}
+
+static inline bool ieee80211_s1g_parse_olb(struct s1g_tim_enc_block *enc,
+ struct s1g_tim_aid *aid)
+{
+ const u8 *ptr = enc->ptr;
+ u8 blk_len = *ptr++;
+ /*
+ * Given an OLB encoded block that describes multiple blocks,
+ * calculate the offset into the span. Then calculate the
+ * subblock location normally.
+ */
+ u16 span_offset = aid->target_blk - enc->olb_blk_offset;
+ u16 subblk_idx = span_offset * 8 + aid->target_subblk;
+
+ if (subblk_idx >= blk_len)
+ return enc->inverse;
+
+ return !!(ptr[subblk_idx] & BIT(aid->target_subblk_bit)) ^ enc->inverse;
+}
+
+/*
+ * An S1G PVB has 3 non optional encoding types, each that can be inverted.
+ * An S1G PVB is constructed with zero or more encoded block subfields. Each
+ * encoded block represents a single "block" of AIDs (64), and each encoded
+ * block can contain one of the 3 encoding types alongside a single bit for
+ * whether the bits should be inverted.
+ *
+ * As the standard makes no guarantee about the ordering of encoded blocks,
+ * we must parse every encoded block in the worst case scenario given an
+ * AID that lies within the last block.
+ */
+static inline bool ieee80211_s1g_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
+{
+ int err;
+ struct s1g_tim_aid target_aid;
+ struct s1g_tim_enc_block enc_blk;
+
+ if (tim_len < 3)
+ return false;
+
+ target_aid.aid = aid;
+ target_aid.target_blk = (aid >> 6) & 0x1f;
+ target_aid.target_subblk = (aid >> 3) & 0x7;
+ target_aid.target_subblk_bit = aid & 0x7;
+
+ /*
+ * Find our AIDs target encoded block and fill &enc_blk with the
+ * encoded blocks information. If no entry is found or an error
+ * occurs return false.
+ */
+ err = ieee80211_s1g_find_target_block(&enc_blk, &target_aid,
+ tim->virtual_map,
+ (const u8 *)tim + tim_len + 2);
+ if (err)
+ return false;
+
+ switch (enc_blk.enc_mode) {
+ case IEEE80211_S1G_TIM_ENC_MODE_BLOCK:
+ return ieee80211_s1g_parse_bitmap(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_SINGLE:
+ return ieee80211_s1g_parse_single(&enc_blk, &target_aid);
+ case IEEE80211_S1G_TIM_ENC_MODE_OLB:
+ return ieee80211_s1g_parse_olb(&enc_blk, &target_aid);
+ default:
+ return false;
+ }
+}
+
+/**
+ * ieee80211_check_tim - check if AID bit is set in TIM
+ * @tim: the TIM IE
+ * @tim_len: length of the TIM IE
+ * @aid: the AID to look for
+ * @s1g: whether the TIM is from an S1G PPDU
+ * Return: whether or not traffic is indicated in the TIM for the given AID
+ */
+static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid, bool s1g)
+{
+ return s1g ? ieee80211_s1g_check_tim(tim, tim_len, aid) :
+ __ieee80211_check_tim(tim, tim_len, aid);
+}
+
/**
* ieee80211_get_tdls_action - get TDLS action code
* @skb: the skb containing the frame, length will not be checked
@@ -5818,4 +6081,21 @@ static inline u32 ieee80211_eml_trans_timeout_in_us(u16 eml_cap)
_data + ieee80211_mle_common_size(_data),\
_len - ieee80211_mle_common_size(_data))
+/* NAN operation mode, as defined in Wi-Fi Aware (TM) specification Table 81 */
+#define NAN_OP_MODE_PHY_MODE_VHT 0x01
+#define NAN_OP_MODE_PHY_MODE_HE 0x10
+#define NAN_OP_MODE_PHY_MODE_MASK 0x11
+#define NAN_OP_MODE_80P80MHZ 0x02
+#define NAN_OP_MODE_160MHZ 0x04
+#define NAN_OP_MODE_PNDL_SUPPRTED 0x08
+
+/* NAN Device capabilities, as defined in Wi-Fi Aware (TM) specification
+ * Table 79
+ */
+#define NAN_DEV_CAPA_DFS_OWNER 0x01
+#define NAN_DEV_CAPA_EXT_KEY_ID_SUPPORTED 0x02
+#define NAN_DEV_CAPA_SIM_NDP_RX_SUPPORTED 0x04
+#define NAN_DEV_CAPA_NDPE_SUPPORTED 0x08
+#define NAN_DEV_CAPA_S3_SUPPORTED 0x10
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ff3beda1312c..db45d6f1c4f4 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -43,7 +43,7 @@ struct pppox_sock {
/* struct sock must be the first member of pppox_sock */
struct sock sk;
struct ppp_channel chan;
- struct pppox_sock *next; /* for hash table */
+ struct pppox_sock __rcu *next; /* for hash table */
union {
struct pppoe_opt pppoe;
struct pptp_opt pptp;
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index a9033696b0aa..704fd415c2b4 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -24,9 +24,6 @@ struct inet_diag_handler {
bool net_admin,
struct sk_buff *skb);
- size_t (*idiag_get_aux_size)(struct sock *sk,
- bool net_admin);
-
int (*destroy)(struct sk_buff *in_skb,
const struct inet_diag_req_v2 *req);
@@ -41,6 +38,11 @@ struct inet_diag_dump_data {
#define inet_diag_nla_bpf_stgs req_nlas[INET_DIAG_REQ_SK_BPF_STORAGES]
struct bpf_sk_storage_diag *bpf_stg_diag;
+ bool mark_needed; /* INET_DIAG_BC_MARK_COND present. */
+#ifdef CONFIG_SOCK_CGROUP_DATA
+ bool cgroup_needed; /* INET_DIAG_BC_CGROUP_COND present. */
+#endif
+ bool userlocks_needed; /* INET_DIAG_BC_AUTO present. */
};
struct inet_connection_sock;
@@ -48,18 +50,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
u16 nlmsg_flags, bool net_admin);
-void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *r);
-int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
- struct netlink_callback *cb,
- const struct inet_diag_req_v2 *req);
-
-struct sock *inet_diag_find_one_icsk(struct net *net,
- struct inet_hashinfo *hashinfo,
- const struct inet_diag_req_v2 *req);
-int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+int inet_diag_bc_sk(const struct inet_diag_dump_data *cb_data, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 138fbd89b1e6..8a823c6f2b4a 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -180,6 +180,7 @@ struct io_pgtable_cfg {
struct {
u64 ttbr[4];
u32 n_ttbrs;
+ u32 n_levels;
} apple_dart_cfg;
struct {
diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h
index cfa6d0c0c322..7509025b4071 100644
--- a/include/linux/io_uring/cmd.h
+++ b/include/linux/io_uring/cmd.h
@@ -11,11 +11,14 @@
/* io_uring_cmd is being issued again */
#define IORING_URING_CMD_REISSUE (1U << 31)
+typedef void (*io_uring_cmd_tw_t)(struct io_uring_cmd *cmd,
+ unsigned issue_flags);
+
struct io_uring_cmd {
struct file *file;
const struct io_uring_sqe *sqe;
/* callback to defer completions to task context */
- void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
+ io_uring_cmd_tw_t task_work_cb;
u32 cmd_op;
u32 flags;
u8 pdu[32]; /* available inline for free use */
@@ -53,11 +56,11 @@ int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
* Note: the caller should never hard code @issue_flags and is only allowed
* to pass the mask provided by the core io_uring code.
*/
-void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
- unsigned issue_flags);
+void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret, u64 res2,
+ unsigned issue_flags, bool is_cqe32);
void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned),
+ io_uring_cmd_tw_t task_work_cb,
unsigned flags);
/*
@@ -70,6 +73,21 @@ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
/* Execute the request from a blocking context */
void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
+/*
+ * Select a buffer from the provided buffer group for multishot uring_cmd.
+ * Returns the selected buffer address and size.
+ */
+struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd,
+ unsigned buf_group, size_t *len,
+ unsigned int issue_flags);
+
+/*
+ * Complete a multishot uring_cmd event. This will post a CQE to the completion
+ * queue and update the provided buffer.
+ */
+bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags);
+
#else
static inline int
io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
@@ -86,13 +104,12 @@ static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
{
return -EOPNOTSUPP;
}
-static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
- u64 ret2, unsigned issue_flags)
+static inline void __io_uring_cmd_done(struct io_uring_cmd *cmd, s32 ret,
+ u64 ret2, unsigned issue_flags, bool is_cqe32)
{
}
static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned),
- unsigned flags)
+ io_uring_cmd_tw_t task_work_cb, unsigned flags)
{
}
static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
@@ -102,28 +119,28 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
{
}
-#endif
-
-/*
- * Polled completions must ensure they are coming from a poll queue, and
- * hence are completed inside the usual poll handling loops.
- */
-static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
- ssize_t ret, ssize_t res2)
+static inline struct io_br_sel
+io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group,
+ size_t *len, unsigned int issue_flags)
+{
+ return (struct io_br_sel) { .val = -EOPNOTSUPP };
+}
+static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd,
+ struct io_br_sel *sel, unsigned int issue_flags)
{
- lockdep_assert(in_task());
- io_uring_cmd_done(ioucmd, ret, res2, 0);
+ return true;
}
+#endif
/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ io_uring_cmd_tw_t task_work_cb)
{
__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
}
static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
- void (*task_work_cb)(struct io_uring_cmd *, unsigned))
+ io_uring_cmd_tw_t task_work_cb)
{
__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
}
@@ -142,6 +159,18 @@ static inline void *io_uring_cmd_ctx_handle(struct io_uring_cmd *cmd)
return cmd_to_io_kiocb(cmd)->ctx;
}
+static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret,
+ unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, 0, issue_flags, false);
+}
+
+static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,
+ u64 res2, unsigned issue_flags)
+{
+ return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, true);
+}
+
int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
void (*release)(void *), unsigned int index,
unsigned int issue_flags);
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 12f5ee43850e..c2ea6280901d 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -86,6 +86,25 @@ struct io_mapped_region {
};
/*
+ * Return value from io_buffer_list selection, to avoid stashing it in
+ * struct io_kiocb. For legacy/classic provided buffers, keeping a reference
+ * across execution contexts are fine. But for ring provided buffers, the
+ * list may go away as soon as ->uring_lock is dropped. As the io_kiocb
+ * persists, it's better to just keep the buffer local for those cases.
+ */
+struct io_br_sel {
+ struct io_buffer_list *buf_list;
+ /*
+ * Some selection parts return the user address, others return an error.
+ */
+ union {
+ void __user *addr;
+ ssize_t val;
+ };
+};
+
+
+/*
* Arbitrary limit, can be raised if need be
*/
#define IO_RINGFD_REG_MAX 16
@@ -671,12 +690,6 @@ struct io_kiocb {
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
struct io_buffer *kbuf;
- /*
- * stores buffer ID for ring provided buffers, valid IFF
- * REQ_F_BUFFER_RING is set.
- */
- struct io_buffer_list *buf_list;
-
struct io_rsrc_node *buf_node;
};
@@ -724,10 +737,4 @@ struct io_overflow_cqe {
struct list_head list;
struct io_uring_cqe cqe;
};
-
-static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx)
-{
- return ctx->flags & IORING_SETUP_CQE32;
-}
-
#endif
diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h
index 508beaa44c39..a92b3ff9b934 100644
--- a/include/linux/iommu-dma.h
+++ b/include/linux/iommu-dma.h
@@ -21,10 +21,9 @@ static inline bool use_dma_iommu(struct device *dev)
}
#endif /* CONFIG_IOMMU_DMA */
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
@@ -43,10 +42,6 @@ size_t iommu_dma_opt_mapping_size(void);
size_t iommu_dma_max_mapping_size(struct device *dev);
void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, unsigned long attrs);
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 91324c331a4b..bdd2e0652bc3 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -14,62 +14,64 @@
#include <linux/io.h>
/**
- * read_poll_timeout - Periodically poll an address until a condition is
- * met or a timeout occurs
- * @op: accessor function (takes @args as its arguments)
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
- * read usleep_range() function description for details and
+ * poll_timeout_us - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
+ *
+ * @op: Operation
+ * @cond: Break condition
+ * @sleep_us: Maximum time to sleep between operations in us (0 tight-loops).
+ * Please read usleep_range() function description for details and
* limitations.
* @timeout_us: Timeout in us, 0 means never timeout
- * @sleep_before_read: if it is true, sleep @sleep_us before read.
- * @args: arguments for @op poll
+ * @sleep_before_op: if it is true, sleep @sleep_us before operation.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*
- * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val. Must not
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*/
-#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
- sleep_before_read, args...) \
+#define poll_timeout_us(op, cond, sleep_us, timeout_us, sleep_before_op) \
({ \
u64 __timeout_us = (timeout_us); \
unsigned long __sleep_us = (sleep_us); \
ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int ___ret; \
might_sleep_if((__sleep_us) != 0); \
- if (sleep_before_read && __sleep_us) \
+ if ((sleep_before_op) && __sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
for (;;) { \
- (val) = op(args); \
- if (cond) \
+ bool __expired = __timeout_us && \
+ ktime_compare(ktime_get(), __timeout) > 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (__timeout_us && \
- ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = op(args); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
if (__sleep_us) \
usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
cpu_relax(); \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
/**
- * read_poll_timeout_atomic - Periodically poll an address until a condition is
- * met or a timeout occurs
- * @op: accessor function (takes @args as its arguments)
- * @val: Variable to read the value into
- * @cond: Break condition (usually involving @val)
- * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
- * read udelay() function description for details and
+ * poll_timeout_us_atomic - Periodically poll and perform an operation until
+ * a condition is met or a timeout occurs
+ *
+ * @op: Operation
+ * @cond: Break condition
+ * @delay_us: Time to udelay between operations in us (0 tight-loops).
+ * Please read udelay() function description for details and
* limitations.
* @timeout_us: Timeout in us, 0 means never timeout
- * @delay_before_read: if it is true, delay @delay_us before read.
- * @args: arguments for @op poll
+ * @delay_before_op: if it is true, delay @delay_us before operation.
*
* This macro does not rely on timekeeping. Hence it is safe to call even when
* timekeeping is suspended, at the expense of an underestimation of wall clock
@@ -78,27 +80,32 @@
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*
- * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
- * case, the last read value at @args is stored in @val.
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout.
*/
-#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
- delay_before_read, args...) \
+#define poll_timeout_us_atomic(op, cond, delay_us, timeout_us, \
+ delay_before_op) \
({ \
u64 __timeout_us = (timeout_us); \
s64 __left_ns = __timeout_us * NSEC_PER_USEC; \
unsigned long __delay_us = (delay_us); \
u64 __delay_ns = __delay_us * NSEC_PER_USEC; \
- if (delay_before_read && __delay_us) { \
+ int ___ret; \
+ if ((delay_before_op) && __delay_us) { \
udelay(__delay_us); \
if (__timeout_us) \
__left_ns -= __delay_ns; \
} \
for (;;) { \
- (val) = op(args); \
- if (cond) \
+ bool __expired = __timeout_us && __left_ns < 0; \
+ /* guarantee 'op' and 'cond' are evaluated after timeout expired */ \
+ barrier(); \
+ op; \
+ if (cond) { \
+ ___ret = 0; \
break; \
- if (__timeout_us && __left_ns < 0) { \
- (val) = op(args); \
+ } \
+ if (__expired) { \
+ ___ret = -ETIMEDOUT; \
break; \
} \
if (__delay_us) { \
@@ -110,10 +117,61 @@
if (__timeout_us) \
__left_ns--; \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ ___ret; \
})
/**
+ * read_poll_timeout - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please
+ * read usleep_range() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * @args: arguments for @op poll
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
+ * read_poll_timeout_atomic - Periodically poll an address until a condition is
+ * met or a timeout occurs
+ * @op: accessor function (takes @args as its arguments)
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Please
+ * read udelay() function description for details and
+ * limitations.
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @delay_before_read: if it is true, delay @delay_us before read.
+ * @args: arguments for @op poll
+ *
+ * This macro does not rely on timekeeping. Hence it is safe to call even when
+ * timekeeping is suspended, at the expense of an underestimation of wall clock
+ * time, which is rather minimal with a non-zero delay_us.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ *
+ * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val.
+ */
+#define read_poll_timeout_atomic(op, val, cond, sleep_us, timeout_us, \
+ sleep_before_read, args...) \
+ poll_timeout_us_atomic((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+
+/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 5d69820d8b02..892e2d656e1e 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -109,8 +109,9 @@ struct ipmi_smi_msg {
enum ipmi_smi_msg_type type;
- long msgid;
- void *user_data;
+ long msgid;
+ /* Response to this message, will be NULL if not from a user request. */
+ struct ipmi_recv_msg *recv_msg;
int data_size;
unsigned char data[IPMI_MAX_MSG_LENGTH];
@@ -168,9 +169,11 @@ struct ipmi_smi_handlers {
* are held when this is run. Message are delivered one at
* a time by the message handler, a new message will not be
* delivered until the previous message is returned.
+ *
+ * This can return an error if the SMI is not in a state where it
+ * can send a message.
*/
- void (*sender)(void *send_info,
- struct ipmi_smi_msg *msg);
+ int (*sender)(void *send_info, struct ipmi_smi_msg *msg);
/*
* Called by the upper layer to request that we try to get
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index bc6ec2959173..43b7bb828738 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -214,18 +214,21 @@ struct inet6_cork {
/* struct ipv6_pinfo - ipv6 private area */
struct ipv6_pinfo {
+ /* Used in tx path (inet6_csk_route_socket(), ip6_xmit()) */
struct in6_addr saddr;
- struct in6_pktinfo sticky_pktinfo;
- const struct in6_addr *daddr_cache;
+ __be32 flow_label;
+ u32 dst_cookie;
+ struct ipv6_txoptions __rcu *opt;
+ s16 hop_limit;
+ u8 pmtudisc;
+ u8 tclass;
#ifdef CONFIG_IPV6_SUBTREES
- const struct in6_addr *saddr_cache;
+ bool saddr_cache;
#endif
+ bool daddr_cache;
- __be32 flow_label;
- __u32 frag_size;
-
- s16 hop_limit;
u8 mcast_hops;
+ u32 frag_size;
int ucast_oif;
int mcast_oif;
@@ -233,7 +236,7 @@ struct ipv6_pinfo {
/* pktoption flags */
union {
struct {
- __u16 srcrt:1,
+ u16 srcrt:1,
osrcrt:1,
rxinfo:1,
rxoinfo:1,
@@ -250,29 +253,25 @@ struct ipv6_pinfo {
recvfragsize:1;
/* 1 bits hole */
} bits;
- __u16 all;
+ u16 all;
} rxopt;
/* sockopt flags */
- __u8 srcprefs; /* 001: prefer temporary address
+ u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- __u8 pmtudisc;
- __u8 min_hopcount;
- __u8 tclass;
+ u8 min_hopcount;
__be32 rcv_flowinfo;
+ struct in6_pktinfo sticky_pktinfo;
- __u32 dst_cookie;
+ struct sk_buff *pktoptions;
+ struct sk_buff *rxpmtu;
+ struct inet6_cork cork;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
struct ipv6_fl_socklist __rcu *ipv6_fl_list;
-
- struct ipv6_txoptions __rcu *opt;
- struct sk_buff *pktoptions;
- struct sk_buff *rxpmtu;
- struct inet6_cork cork;
};
/* We currently use available bits from inet_sk(sk)->inet_flags,
@@ -295,7 +294,7 @@ struct raw6_sock {
__u32 offset; /* checksum offset */
struct icmp6_filter filter;
__u32 ip6mr_table;
-
+ struct numa_drop_counters drop_counters;
struct ipv6_pinfo inet6;
};
diff --git a/include/linux/ism.h b/include/linux/ism.h
index 8358b4cd7ba6..b7feb4dcd5a8 100644
--- a/include/linux/ism.h
+++ b/include/linux/ism.h
@@ -11,17 +11,6 @@
#include <linux/workqueue.h>
-struct ism_dmb {
- u64 dmb_tok;
- u64 rgid;
- u32 dmb_len;
- u32 sba_idx;
- u32 vlan_valid;
- u32 vlan_id;
- void *cpu_addr;
- dma_addr_t dma_addr;
-};
-
/* Unless we gain unexpected popularity, this limit should hold for a while */
#define MAX_CLIENTS 8
#define ISM_NR_DMBS 1920
@@ -30,19 +19,17 @@ struct ism_dev {
spinlock_t lock; /* protects the ism device */
spinlock_t cmd_lock; /* serializes cmds */
struct list_head list;
+ struct dibs_dev *dibs;
struct pci_dev *pdev;
struct ism_sba *sba;
dma_addr_t sba_dma_addr;
DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
- u8 *sba_client_arr; /* entries are indices into 'clients' array */
void *priv[MAX_CLIENTS];
struct ism_eq *ieq;
dma_addr_t ieq_dma_addr;
- struct device dev;
- u64 local_gid;
int ieq_idx;
struct ism_client *subs[MAX_CLIENTS];
@@ -58,14 +45,7 @@ struct ism_event {
struct ism_client {
const char *name;
- void (*add)(struct ism_dev *dev);
- void (*remove)(struct ism_dev *dev);
void (*handle_event)(struct ism_dev *dev, struct ism_event *event);
- /* Parameter dmbemask contains a bit vector with updated DMBEs, if sent
- * via ism_move_data(). Callback function must handle all active bits
- * indicated by dmbemask.
- */
- void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
/* Private area - don't touch! */
u8 id;
};
@@ -82,12 +62,6 @@ static inline void ism_set_priv(struct ism_dev *dev, struct ism_client *client,
dev->priv[client->id] = priv;
}
-int ism_register_dmb(struct ism_dev *dev, struct ism_dmb *dmb,
- struct ism_client *client);
-int ism_unregister_dmb(struct ism_dev *dev, struct ism_dmb *dmb);
-int ism_move(struct ism_dev *dev, u64 dmb_tok, unsigned int idx, bool sf,
- unsigned int offset, void *data, unsigned int size);
-
const struct smcd_ops *ism_get_smcd_ops(void);
#endif /* _ISM_H */
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
index 6f612d69ea0c..9eca967d8526 100644
--- a/include/linux/kasan-enabled.h
+++ b/include/linux/kasan-enabled.h
@@ -4,32 +4,46 @@
#include <linux/static_key.h>
-#ifdef CONFIG_KASAN_HW_TAGS
-
+#if defined(CONFIG_ARCH_DEFER_KASAN) || defined(CONFIG_KASAN_HW_TAGS)
+/*
+ * Global runtime flag for KASAN modes that need runtime control.
+ * Used by ARCH_DEFER_KASAN architectures and HW_TAGS mode.
+ */
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
+/*
+ * Runtime control for shadow memory initialization or HW_TAGS mode.
+ * Uses static key for architectures that need deferred KASAN or HW_TAGS.
+ */
static __always_inline bool kasan_enabled(void)
{
return static_branch_likely(&kasan_flag_enabled);
}
-static inline bool kasan_hw_tags_enabled(void)
+static inline void kasan_enable(void)
{
- return kasan_enabled();
+ static_branch_enable(&kasan_flag_enabled);
}
-
-#else /* CONFIG_KASAN_HW_TAGS */
-
-static inline bool kasan_enabled(void)
+#else
+/* For architectures that can enable KASAN early, use compile-time check. */
+static __always_inline bool kasan_enabled(void)
{
return IS_ENABLED(CONFIG_KASAN);
}
+static inline void kasan_enable(void) {}
+#endif /* CONFIG_ARCH_DEFER_KASAN || CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+static inline bool kasan_hw_tags_enabled(void)
+{
+ return kasan_enabled();
+}
+#else
static inline bool kasan_hw_tags_enabled(void)
{
return false;
}
-
#endif /* CONFIG_KASAN_HW_TAGS */
#endif /* LINUX_KASAN_ENABLED_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index fe5ce9215821..d12e1a5f5a9a 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -200,7 +200,7 @@ static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
}
bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
- bool still_accessible);
+ bool still_accessible, bool no_quarantine);
/**
* kasan_slab_free - Poison, initialize, and quarantine a slab object.
* @object: Object to be freed.
@@ -226,11 +226,13 @@ bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init,
* @Return true if KASAN took ownership of the object; false otherwise.
*/
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
- void *object, bool init,
- bool still_accessible)
+ void *object, bool init,
+ bool still_accessible,
+ bool no_quarantine)
{
if (kasan_enabled())
- return __kasan_slab_free(s, object, init, still_accessible);
+ return __kasan_slab_free(s, object, init, still_accessible,
+ no_quarantine);
return false;
}
@@ -427,7 +429,8 @@ static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
- bool init, bool still_accessible)
+ bool init, bool still_accessible,
+ bool no_quarantine)
{
return false;
}
@@ -543,6 +546,12 @@ void kasan_report_async(void);
#endif /* CONFIG_KASAN_HW_TAGS */
+#ifdef CONFIG_KASAN_GENERIC
+void __init kasan_init_generic(void);
+#else
+static inline void kasan_init_generic(void) { }
+#endif
+
#ifdef CONFIG_KASAN_SW_TAGS
void __init kasan_init_sw_tags(void);
#else
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 989315dabb86..5b46924fdff5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -164,11 +164,23 @@ extern int root_mountflags;
extern bool early_boot_irqs_disabled;
-/*
- * Values used for system_state. Ordering of the states must not be changed
+/**
+ * enum system_states - Values used for system_state.
+ *
+ * @SYSTEM_BOOTING: %0, no init needed
+ * @SYSTEM_SCHEDULING: system is ready for scheduling; OK to use RCU
+ * @SYSTEM_FREEING_INITMEM: system is freeing all of initmem; almost running
+ * @SYSTEM_RUNNING: system is up and running
+ * @SYSTEM_HALT: system entered clean system halt state
+ * @SYSTEM_POWER_OFF: system entered shutdown/clean power off state
+ * @SYSTEM_RESTART: system entered emergency power off or normal restart
+ * @SYSTEM_SUSPEND: system entered suspend or hibernate state
+ *
+ * Note:
+ * Ordering of the states must not be changed
* as code checks for <, <=, >, >= STATE.
*/
-extern enum system_states {
+enum system_states {
SYSTEM_BOOTING,
SYSTEM_SCHEDULING,
SYSTEM_FREEING_INITMEM,
@@ -177,7 +189,8 @@ extern enum system_states {
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
SYSTEM_SUSPEND,
-} system_state;
+};
+extern enum system_states system_state;
/*
* General tracing related utility functions - trace_printk(),
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 39fe3e6cd282..ff7e231b0485 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -395,6 +395,9 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
+
+ /* Force carrying over the DTB from the current boot */
+ bool force_dtb;
#endif
#ifdef CONFIG_CRASH_HOTPLUG
@@ -461,7 +464,7 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec file flags */
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG | \
- KEXEC_FILE_NO_CMA)
+ KEXEC_FILE_NO_CMA | KEXEC_FILE_FORCE_DTB)
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
diff --git a/include/linux/kexec_handover.h b/include/linux/kexec_handover.h
index 348844cffb13..559d13a3bc44 100644
--- a/include/linux/kexec_handover.h
+++ b/include/linux/kexec_handover.h
@@ -40,6 +40,7 @@ struct kho_serialization;
#ifdef CONFIG_KEXEC_HANDOVER
bool kho_is_enabled(void);
+bool is_kho_boot(void);
int kho_preserve_folio(struct folio *folio);
int kho_preserve_phys(phys_addr_t phys, size_t size);
@@ -60,6 +61,11 @@ static inline bool kho_is_enabled(void)
return false;
}
+static inline bool is_kho_boot(void)
+{
+ return false;
+}
+
static inline int kho_preserve_folio(struct folio *folio)
{
return -EOPNOTSUPP;
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index ff6120463745..eb1946a70cff 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H
+#include <linux/mm.h>
+
extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;
@@ -20,13 +22,13 @@ extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
+ if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm))
__khugepaged_enter(mm);
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+ if (mm_flags_test(MMF_VM_HUGEPAGE, mm))
__khugepaged_exit(mm);
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
index 2b1432cc16d5..f2fd221107bb 100644
--- a/include/linux/kmsan.h
+++ b/include/linux/kmsan.h
@@ -182,8 +182,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
/**
* kmsan_handle_dma() - Handle a DMA data transfer.
- * @page: first page of the buffer.
- * @offset: offset of the buffer within the first page.
+ * @phys: physical address of the buffer.
* @size: buffer size.
* @dir: one of possible dma_data_direction values.
*
@@ -192,7 +191,7 @@ void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
* * initializes the buffer, if it is copied from device;
* * does both, if this is a DMA_BIDIRECTIONAL transfer.
*/
-void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+void kmsan_handle_dma(phys_addr_t phys, size_t size,
enum dma_data_direction dir);
/**
@@ -372,8 +371,8 @@ static inline void kmsan_iounmap_page_range(unsigned long start,
{
}
-static inline void kmsan_handle_dma(struct page *page, size_t offset,
- size_t size, enum dma_data_direction dir)
+static inline void kmsan_handle_dma(phys_addr_t phys, size_t size,
+ enum dma_data_direction dir)
{
}
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index c17b955e7b0b..067538fc4d58 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -56,13 +56,19 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm)
static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
/* Adding mm to ksm is best effort on fork. */
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ if (mm_flags_test(MMF_VM_MERGEABLE, oldmm)) {
+ long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
+
+ mm->ksm_merging_pages = 0;
+ mm->ksm_rmap_items = 0;
+ atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
__ksm_enter(mm);
+ }
}
static inline int ksm_execve(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ if (mm_flags_test(MMF_VM_MERGE_ANY, mm))
return __ksm_enter(mm);
return 0;
@@ -70,7 +76,7 @@ static inline int ksm_execve(struct mm_struct *mm)
static inline void ksm_exit(struct mm_struct *mm)
{
- if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ if (mm_flags_test(MMF_VM_MERGEABLE, mm))
__ksm_exit(mm);
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0620dd67369f..21de0935775d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1203,7 +1203,7 @@ extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
- struct block_device *bdev,
+ struct gendisk *unused,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
extern int ata_scsi_sdev_init(struct scsi_device *sdev);
diff --git a/include/linux/list.h b/include/linux/list.h
index e7e28afd28f8..5bfda2f91fca 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -20,8 +20,16 @@
* using the generic single-entry routines.
*/
+/**
+ * LIST_HEAD_INIT - initialize a &struct list_head's links to point to itself
+ * @name: name of the list_head
+ */
#define LIST_HEAD_INIT(name) { &(name), &(name) }
+/**
+ * LIST_HEAD - definition of a &struct list_head with initialization values
+ * @name: name of the list_head
+ */
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
@@ -637,6 +645,20 @@ static inline void list_splice_tail_init(struct list_head *list,
})
/**
+ * list_last_entry_or_null - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_last_entry_or_null(ptr, type, member) ({ \
+ struct list_head *head__ = (ptr); \
+ struct list_head *pos__ = READ_ONCE(head__->prev); \
+ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
+})
+
+/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h
index 2ba846419524..0d91d060e3e9 100644
--- a/include/linux/local_lock.h
+++ b/include/linux/local_lock.h
@@ -66,6 +66,8 @@
*/
#define local_trylock(lock) __local_trylock(this_cpu_ptr(lock))
+#define local_lock_is_locked(lock) __local_lock_is_locked(lock)
+
/**
* local_trylock_irqsave - Try to acquire a per CPU local lock, save and disable
* interrupts if acquired
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index d80b5306a2c0..a4dc479157b5 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -17,7 +17,10 @@ typedef struct {
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
typedef struct {
- local_lock_t llock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+#endif
u8 acquired;
} local_trylock_t;
@@ -31,7 +34,7 @@ typedef struct {
.owner = NULL,
# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
- .llock = { LOCAL_LOCK_DEBUG_INIT((lockname).llock) },
+ LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l)
{
@@ -81,7 +84,7 @@ do { \
local_lock_debug_init(lock); \
} while (0)
-#define __local_trylock_init(lock) __local_lock_init(lock.llock)
+#define __local_trylock_init(lock) __local_lock_init((local_lock_t *)lock)
#define __spinlock_nested_bh_init(lock) \
do { \
@@ -162,6 +165,9 @@ do { \
!!tl; \
})
+/* preemption or migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
+
#define __local_lock_release(lock) \
do { \
local_trylock_t *tl; \
@@ -282,4 +288,8 @@ do { \
__local_trylock(lock); \
})
+/* migration must be disabled before calling __local_lock_is_locked */
+#define __local_lock_is_locked(__lock) \
+ (rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
+
#endif /* CONFIG_PREEMPT_RT */
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index adbe234a6f6c..8c42b4bde09c 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -85,7 +85,7 @@ LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
int mode, const struct qstr *name, const char **xattr_name,
struct lsm_context *cp)
LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
- struct qstr *name, const struct cred *old, struct cred *new)
+ const struct qstr *name, const struct cred *old, struct cred *new)
#ifdef CONFIG_SECURITY_PATH
LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry)
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index bafe143b1f78..66f98a3da8d8 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -57,17 +57,17 @@
* MT_FLAGS_ALLOC_RANGE flag.
*
* Node types:
- * 0x??1 = Root
- * 0x?00 = 16 bit nodes
- * 0x010 = 32 bit nodes
- * 0x110 = 64 bit nodes
+ * 0b??1 = Root
+ * 0b?00 = 16 bit nodes
+ * 0b010 = 32 bit nodes
+ * 0b110 = 64 bit nodes
*
* Slot size and location in the parent pointer:
* type : slot location
- * 0x??1 : Root
- * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
- * 0x010 : 32 bit values, type in 0-2, slot in 3-6
- * 0x110 : 64 bit values, type in 0-2, slot in 3-6
+ * 0b??1 : Root
+ * 0b?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0b010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0b110 : 64 bit values, type in 0-2, slot in 3-6
*/
/*
@@ -194,7 +194,6 @@ enum store_type {
#define MAPLE_RESERVED_RANGE 4096
#ifdef CONFIG_LOCKDEP
-typedef struct lockdep_map *lockdep_map_p;
#define mt_lock_is_held(mt) \
(!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
@@ -207,7 +206,6 @@ typedef struct lockdep_map *lockdep_map_p;
#define mt_on_stack(mt) (mt).ma_external_lock = NULL
#else
-typedef struct { /* nothing */ } lockdep_map_p;
#define mt_lock_is_held(mt) 1
#define mt_write_lock_is_held(mt) 1
#define mt_set_external_lock(mt, lock) do { } while (0)
@@ -230,8 +228,10 @@ typedef struct { /* nothing */ } lockdep_map_p;
*/
struct maple_tree {
union {
- spinlock_t ma_lock;
- lockdep_map_p ma_external_lock;
+ spinlock_t ma_lock;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *ma_external_lock;
+#endif
};
unsigned int ma_flags;
void __rcu *ma_root;
@@ -442,7 +442,9 @@ struct ma_state {
struct maple_enode *node; /* The node containing this entry */
unsigned long min; /* The minimum index of this node - implied pivot min */
unsigned long max; /* The maximum index of this node - implied pivot max */
- struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ struct slab_sheaf *sheaf; /* Allocated nodes for this operation */
+ struct maple_node *alloc; /* A single allocated node for fast path writes */
+ unsigned long node_request; /* The number of nodes to allocate for this operation */
enum maple_status status; /* The status of the state (active, start, none, etc) */
unsigned char depth; /* depth of tree descent during write */
unsigned char offset;
@@ -481,6 +483,9 @@ struct ma_wr_state {
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+/*
+ * When changing MA_STATE, remember to also change rust/kernel/maple_tree.rs
+ */
#define MA_STATE(name, mt, first, end) \
struct ma_state name = { \
.tree = mt, \
@@ -490,7 +495,9 @@ struct ma_wr_state {
.status = ma_start, \
.min = 0, \
.max = ULONG_MAX, \
+ .sheaf = NULL, \
.alloc = NULL, \
+ .node_request = 0, \
.mas_flags = 0, \
.store_type = wr_invalid, \
}
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 725fd7727422..a82755e1fc40 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -113,6 +113,7 @@ int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
mei_cldev_cb_t notif_cb);
u8 mei_cldev_ver(const struct mei_cl_device *cldev);
+size_t mei_cldev_mtu(const struct mei_cl_device *cldev);
void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev);
void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 785173aa0739..16fe0306e50e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -341,17 +341,25 @@ enum page_memcg_data_flags {
__NR_MEMCG_DATA_FLAGS = (1UL << 2),
};
+#define __OBJEXTS_ALLOC_FAIL MEMCG_DATA_OBJEXTS
#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS
#else /* CONFIG_MEMCG */
+#define __OBJEXTS_ALLOC_FAIL (1UL << 0)
#define __FIRST_OBJEXT_FLAG (1UL << 0)
#endif /* CONFIG_MEMCG */
enum objext_flags {
- /* slabobj_ext vector failed to allocate */
- OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
+ /*
+ * Use bit 0 with zero other bits to signal that slabobj_ext vector
+ * failed to allocate. The same bit 0 with valid upper bits means
+ * MEMCG_DATA_OBJEXTS.
+ */
+ OBJEXTS_ALLOC_FAIL = __OBJEXTS_ALLOC_FAIL,
+ /* slabobj_ext vector allocated with kmalloc_nolock() */
+ OBJEXTS_NOSPIN_ALLOC = __FIRST_OBJEXT_FLAG,
/* the next bit after the last actual flag */
__NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1),
};
@@ -900,7 +908,13 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(gfp_t gfp_mask);
+void __mem_cgroup_handle_over_high(gfp_t gfp_mask);
+
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
+{
+ if (unlikely(current->memcg_nr_pages_over_high))
+ __mem_cgroup_handle_over_high(gfp_mask);
+}
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -1053,6 +1067,8 @@ extern int mem_cgroup_init(void);
#define MEM_CGROUP_ID_SHIFT 0
+#define root_mem_cgroup (NULL)
+
static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
return NULL;
@@ -1596,14 +1612,16 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
#endif /* CONFIG_CGROUP_WRITEBACK */
struct sock;
-bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
- gfp_t gfp_mask);
-void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
+
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
+void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk);
+bool mem_cgroup_sk_charge(const struct sock *sk, unsigned int nr_pages,
+ gfp_t gfp_mask);
+void mem_cgroup_sk_uncharge(const struct sock *sk, unsigned int nr_pages);
#if BITS_PER_LONG < 64
static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
@@ -1640,32 +1658,37 @@ static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
}
#endif
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
-{
-#ifdef CONFIG_MEMCG_V1
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
- return !!memcg->tcpmem_pressure;
-#endif /* CONFIG_MEMCG_V1 */
- do {
- if (time_before64(get_jiffies_64(), mem_cgroup_get_socket_pressure(memcg)))
- return true;
- } while ((memcg = parent_mem_cgroup(memcg)));
- return false;
-}
-
int alloc_shrinker_info(struct mem_cgroup *memcg);
void free_shrinker_info(struct mem_cgroup *memcg);
void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
void reparent_shrinker_deferred(struct mem_cgroup *memcg);
#else
#define mem_cgroup_sockets_enabled 0
-static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
-static inline void mem_cgroup_sk_free(struct sock *sk) { };
-static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
+
+static inline void mem_cgroup_sk_alloc(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_free(struct sock *sk)
+{
+}
+
+static inline void mem_cgroup_sk_inherit(const struct sock *sk, struct sock *newsk)
+{
+}
+
+static inline bool mem_cgroup_sk_charge(const struct sock *sk,
+ unsigned int nr_pages,
+ gfp_t gfp_mask)
{
return false;
}
+static inline void mem_cgroup_sk_uncharge(const struct sock *sk,
+ unsigned int nr_pages)
+{
+}
+
static inline void set_shrinker_bit(struct mem_cgroup *memcg,
int nid, int shrinker_id)
{
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 7b151441341b..34941a4b9026 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -15,7 +15,7 @@ struct kmem_cache;
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
-typedef struct mempool_s {
+typedef struct mempool {
spinlock_t lock;
int min_nr; /* nr of elements at *elements */
int curr_nr; /* Current nr of elements at *elements */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 4aa151914eab..e5951ba12a28 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -157,45 +157,52 @@ static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
return 1 << pgmap->vmemmap_shift;
}
-static inline bool is_device_private_page(const struct page *page)
+static inline bool folio_is_device_private(const struct folio *folio)
{
return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
- is_zone_device_page(page) &&
- page_pgmap(page)->type == MEMORY_DEVICE_PRIVATE;
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PRIVATE;
}
-static inline bool folio_is_device_private(const struct folio *folio)
+static inline bool is_device_private_page(const struct page *page)
{
- return is_device_private_page(&folio->page);
+ return IS_ENABLED(CONFIG_DEVICE_PRIVATE) &&
+ folio_is_device_private(page_folio(page));
}
-static inline bool is_pci_p2pdma_page(const struct page *page)
+static inline bool folio_is_pci_p2pdma(const struct folio *folio)
{
return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
- is_zone_device_page(page) &&
- page_pgmap(page)->type == MEMORY_DEVICE_PCI_P2PDMA;
+ folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
-static inline bool is_device_coherent_page(const struct page *page)
+static inline bool is_pci_p2pdma_page(const struct page *page)
{
- return is_zone_device_page(page) &&
- page_pgmap(page)->type == MEMORY_DEVICE_COHERENT;
+ return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
+ folio_is_pci_p2pdma(page_folio(page));
}
static inline bool folio_is_device_coherent(const struct folio *folio)
{
- return is_device_coherent_page(&folio->page);
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_COHERENT;
}
-static inline bool is_fsdax_page(const struct page *page)
+static inline bool is_device_coherent_page(const struct page *page)
{
- return is_zone_device_page(page) &&
- page_pgmap(page)->type == MEMORY_DEVICE_FS_DAX;
+ return folio_is_device_coherent(page_folio(page));
}
static inline bool folio_is_fsdax(const struct folio *folio)
{
- return is_fsdax_page(&folio->page);
+ return folio_is_zone_device(folio) &&
+ folio->pgmap->type == MEMORY_DEVICE_FS_DAX;
+}
+
+static inline bool is_fsdax_page(const struct page *page)
+{
+ return folio_is_fsdax(page_folio(page));
}
#ifdef CONFIG_ZONE_DEVICE
@@ -204,8 +211,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap);
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
@@ -227,8 +233,7 @@ static inline void devm_memunmap_pages(struct device *dev,
{
}
-static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
- struct dev_pagemap *pgmap)
+static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn)
{
return NULL;
}
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 9af01bdd86d2..ca691641788b 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -32,6 +32,7 @@
#define PHY_ID_LAN8814 0x00221660
#define PHY_ID_LAN8804 0x00221670
#define PHY_ID_LAN8841 0x00221650
+#define PHY_ID_LAN8842 0x002216C0
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 9009e27b5f44..1f0ac122c3bf 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,14 +12,6 @@ typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control;
-/*
- * Return values from addresss_space_operations.migratepage():
- * - negative errno on page migration failure;
- * - zero on page migration success;
- */
-#define MIGRATEPAGE_SUCCESS 0
-#define MIGRATEPAGE_UNMAP 1
-
/**
* struct movable_operations - Driver page migration
* @isolate_page:
@@ -35,8 +27,7 @@ struct migration_target_control;
* @src page. The driver should copy the contents of the
* @src page to the @dst page and set up the fields of @dst page.
* Both pages are locked.
- * If page migration is successful, the driver should
- * return MIGRATEPAGE_SUCCESS.
+ * If page migration is successful, the driver should return 0.
* If the driver cannot migrate the page at the moment, it can return
* -EAGAIN. The VM interprets this as a temporary migration failure and
* will retry it later. Any other error value is a permanent migration
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 991526039ccb..7ef2c7c7d803 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -41,7 +41,6 @@ struct mlx5_core_cq {
int cqe_sz;
__be32 *set_ci_db;
__be32 *arm_db;
- struct mlx5_uars_page *uar;
refcount_t refcount;
struct completion free;
unsigned vector;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 9d2467f982ad..d7f46a8fbfa1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1248,6 +1248,7 @@ enum mlx5_cap_type {
MLX5_CAP_IPSEC,
MLX5_CAP_CRYPTO = 0x1a,
MLX5_CAP_SHAMPO = 0x1d,
+ MLX5_CAP_PSP = 0x1e,
MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
MLX5_CAP_PORT_SELECTION = 0x25,
@@ -1487,6 +1488,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_SHAMPO(mdev, cap) \
MLX5_GET(shampo_cap, mdev->caps.hca[MLX5_CAP_SHAMPO]->cur, cap)
+#define MLX5_CAP_PSP(mdev, cap)\
+ MLX5_GET(psp_cap, (mdev)->caps.hca[MLX5_CAP_PSP]->cur, cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
@@ -1521,6 +1525,7 @@ enum {
MLX5_PHYSICAL_LAYER_RECOVERY_GROUP = 0x1a,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21,
+ MLX5_RS_FEC_HISTOGRAM_GROUP = 0x23,
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 10fe492e1fed..5405ca1038f9 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -130,12 +130,14 @@ enum {
MLX5_REG_PDDR = 0x5031,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PPLM = 0x5023,
+ MLX5_REG_PPHCR = 0x503E,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MTCAP = 0x9009,
MLX5_REG_MTMP = 0x900A,
MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MNVDA = 0x9024,
MLX5_REG_MFRL = 0x9028,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MRTC = 0x902d,
@@ -432,7 +434,6 @@ struct mlx5_sq_bfreg {
struct mlx5_uars_page *up;
bool wc;
u32 index;
- unsigned int offset;
};
struct mlx5_core_health {
@@ -611,7 +612,7 @@ struct mlx5_priv {
struct mlx5_ft_pool *ft_pool;
struct mlx5_bfreg_data bfregs;
- struct mlx5_uars_page *uar;
+ struct mlx5_sq_bfreg bfreg;
#ifdef CONFIG_MLX5_SF
struct mlx5_vhca_state_notifier *vhca_state_notifier;
struct mlx5_sf_dev_table *sf_dev_table;
@@ -657,7 +658,8 @@ struct mlx5e_resources {
u32 pdn;
struct mlx5_td td;
u32 mkey;
- struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg *bfregs;
+ unsigned int num_bfregs;
#define MLX5_MAX_NUM_TC 8
u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
bool tisn_valid;
@@ -802,6 +804,8 @@ struct mlx5_db {
int index;
};
+#define MLX5_DEFAULT_NUM_DOORBELLS 8
+
enum {
MLX5_COMP_EQ_SIZE = 1024,
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 8360d9011d4f..07614cd95bed 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -189,6 +189,9 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
+ MLX5_CMD_OPCODE_QUERY_DELEGATED_VHCA = 0x732,
+ MLX5_CMD_OPCODE_CREATE_ESW_VPORT = 0x733,
+ MLX5_CMD_OPCODE_DESTROY_ESW_VPORT = 0x734,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
@@ -311,6 +314,8 @@ enum {
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
+ MLX5_CMD_OP_PSP_GEN_SPI = 0xb10,
+ MLX5_CMD_OP_PSP_ROTATE_KEY = 0xb11,
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
@@ -486,12 +491,14 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 execute_aso[0x1];
u8 reserved_at_47[0x19];
- u8 reserved_at_60[0x2];
+ u8 reformat_l2_to_l3_psp_tunnel[0x1];
+ u8 reformat_l3_psp_tunnel_to_l2[0x1];
u8 reformat_insert[0x1];
u8 reformat_remove[0x1];
u8 macsec_encrypt[0x1];
u8 macsec_decrypt[0x1];
- u8 reserved_at_66[0x2];
+ u8 psp_encrypt[0x1];
+ u8 psp_decrypt[0x1];
u8 reformat_add_macsec[0x1];
u8 reformat_remove_macsec[0x1];
u8 reparse[0x1];
@@ -700,7 +707,7 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 metadata_reg_a[0x20];
- u8 reserved_at_1a0[0x8];
+ u8 psp_syndrome[0x8];
u8 macsec_syndrome[0x8];
u8 ipsec_syndrome[0x8];
u8 ipsec_next_header[0x8];
@@ -1508,6 +1515,21 @@ struct mlx5_ifc_macsec_cap_bits {
u8 reserved_at_40[0x7c0];
};
+struct mlx5_ifc_psp_cap_bits {
+ u8 reserved_at_0[0x1];
+ u8 psp_crypto_offload[0x1];
+ u8 reserved_at_2[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 psp_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_num_of_psp_spi[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x7e0];
+};
+
enum {
MLX5_WQ_TYPE_LINKED_LIST = 0x0,
MLX5_WQ_TYPE_CYCLIC = 0x1,
@@ -1873,7 +1895,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2a0[0x7];
u8 mkey_pcie_tph[0x1];
- u8 reserved_at_2a8[0x3];
+ u8 reserved_at_2a8[0x1];
+ u8 tis_tir_td_order[0x1];
+
+ u8 psp[0x1];
u8 shampo[0x1];
u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
@@ -1934,7 +1959,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_rqt[0x5];
u8 reserved_at_390[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_at_398[0x3];
+ u8 reserved_at_398[0x1];
+ u8 vnic_env_cnt_bar_uar_access[0x1];
+ u8 vnic_env_cnt_odp_page_fault[0x1];
u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1];
@@ -2207,7 +2234,23 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_440[0x8];
u8 max_num_eqs_24b[0x18];
- u8 reserved_at_460[0x3a0];
+
+ u8 reserved_at_460[0x144];
+ u8 load_balance_id[0x4];
+ u8 reserved_at_5a8[0x18];
+
+ u8 query_adjacent_functions_id[0x1];
+ u8 ingress_egress_esw_vport_connect[0x1];
+ u8 function_id_type_vhca_id[0x1];
+ u8 reserved_at_5c3[0x1];
+ u8 lag_per_mp_group[0x1];
+ u8 reserved_at_5c5[0xb];
+ u8 delegate_vhca_management_profiles[0x10];
+
+ u8 delegated_vhca_max[0x10];
+ u8 delegate_vhca_max[0x10];
+
+ u8 reserved_at_600[0x200];
};
enum mlx5_ifc_flow_destination_type {
@@ -3788,6 +3831,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_macsec_cap_bits macsec_cap;
struct mlx5_ifc_crypto_cap_bits crypto_cap;
struct mlx5_ifc_ipsec_cap_bits ipsec_cap;
+ struct mlx5_ifc_psp_cap_bits psp_cap;
u8 reserved_at_0[0x8000];
};
@@ -3817,6 +3861,7 @@ enum {
enum {
MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_MACSEC = 0x1,
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_PSP = 0x2,
};
struct mlx5_ifc_vlan_bits {
@@ -3981,7 +4026,13 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits {
u8 handled_pkt_steering_fail[0x40];
- u8 reserved_at_360[0xc80];
+ u8 bar_uar_access[0x20];
+
+ u8 odp_local_triggered_page_fault[0x20];
+
+ u8 odp_remote_triggered_page_fault[0x20];
+
+ u8 reserved_at_3c0[0xc20];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -4863,6 +4914,11 @@ union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
u8 reserved_at_0[0x20];
};
+struct mlx5_ifc_rs_histogram_cntrs_bits {
+ u8 hist[16][0x40];
+ u8 reserved_at_400[0x2c0];
+};
+
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
@@ -4877,6 +4933,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
struct mlx5_ifc_phys_layer_recovery_cntrs_bits phys_layer_recovery_cntrs;
+ struct mlx5_ifc_rs_histogram_cntrs_bits rs_histogram_cntrs;
u8 reserved_at_0[0x7c0];
};
@@ -5159,7 +5216,9 @@ struct mlx5_ifc_set_hca_cap_in_bits {
u8 other_function[0x1];
u8 ec_vf_function[0x1];
- u8 reserved_at_42[0xe];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -6357,7 +6416,9 @@ struct mlx5_ifc_query_hca_cap_in_bits {
u8 other_function[0x1];
u8 ec_vf_function[0x1];
- u8 reserved_at_42[0xe];
+ u8 reserved_at_42[0x1];
+ u8 function_id_type[0x1];
+ u8 reserved_at_44[0xc];
u8 function_id[0x10];
u8 reserved_at_60[0x20];
@@ -6983,6 +7044,28 @@ struct mlx5_ifc_query_esw_vport_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_destroy_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_destroy_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 vport_num[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_modify_esw_vport_context_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7118,6 +7201,8 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
+ MLX5_REFORMAT_TYPE_ADD_PSP_TUNNEL = 0xd,
+ MLX5_REFORMAT_TYPE_DEL_PSP_TUNNEL = 0xe,
MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
@@ -7244,6 +7329,7 @@ enum {
MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D,
MLX5_ACTION_IN_FIELD_OUT_EMD_47_32 = 0x6F,
MLX5_ACTION_IN_FIELD_OUT_EMD_31_0 = 0x70,
+ MLX5_ACTION_IN_FIELD_PSP_SYNDROME = 0x71,
};
struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -7484,6 +7570,85 @@ struct mlx5_ifc_query_adapter_in_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_function_vhca_rid_info_reg_bits {
+ u8 host_number[0x8];
+ u8 host_pci_device_function[0x8];
+ u8 host_pci_bus[0x8];
+ u8 reserved_at_18[0x3];
+ u8 pci_bus_assigned[0x1];
+ u8 function_type[0x4];
+
+ u8 parent_pci_device_function[0x8];
+ u8 parent_pci_bus[0x8];
+ u8 vhca_id[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_delegated_function_vhca_rid_info_bits {
+ struct mlx5_ifc_function_vhca_rid_info_reg_bits function_vhca_rid_info;
+
+ u8 reserved_at_80[0x18];
+ u8 manage_profile[0x8];
+
+ u8 reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_query_delegated_vhca_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 functions_count[0x10];
+
+ u8 reserved_at_80[0x80];
+
+ struct mlx5_ifc_delegated_function_vhca_rid_info_bits
+ delegated_function_vhca_rid_info[];
+};
+
+struct mlx5_ifc_query_delegated_vhca_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_esw_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 vport_num[0x10];
+};
+
+struct mlx5_ifc_create_esw_vport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 managed_vhca_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_qp_2rst_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7611,7 +7776,12 @@ struct mlx5_ifc_modify_vport_state_in_bits {
u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_at_60[0x18];
+ u8 reserved_at_60[0x10];
+ u8 ingress_connect[0x1];
+ u8 egress_connect[0x1];
+ u8 ingress_connect_valid[0x1];
+ u8 egress_connect_valid[0x1];
+ u8 reserved_at_74[0x4];
u8 admin_state[0x4];
u8 reserved_at_7c[0x4];
};
@@ -11587,6 +11757,28 @@ struct mlx5_ifc_mtctr_reg_bits {
u8 second_clock_timestamp[0x40];
};
+struct mlx5_ifc_bin_range_layout_bits {
+ u8 reserved_at_0[0xa];
+ u8 high_val[0x6];
+ u8 reserved_at_10[0xa];
+ u8 low_val[0x6];
+};
+
+struct mlx5_ifc_pphcr_reg_bits {
+ u8 active_hist_type[0x4];
+ u8 reserved_at_4[0x4];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x8];
+ u8 num_of_bins[0x8];
+ u8 reserved_at_30[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_bin_range_layout_bits bin_range[16];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -11653,6 +11845,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mtmp_reg_bits mtmp_reg;
struct mlx5_ifc_mtptm_reg_bits mtptm_reg;
struct mlx5_ifc_mtctr_reg_bits mtctr_reg;
+ struct mlx5_ifc_pphcr_reg_bits pphcr_reg;
u8 reserved_at_0[0x60e0];
};
@@ -12954,6 +13147,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_PSP = 0x6,
};
struct mlx5_ifc_tls_static_params_bits {
@@ -13371,4 +13565,64 @@ enum mlx5e_pcie_cong_event_mod_field {
MLX5_PCIE_CONG_EVENT_MOD_THRESH = BIT(2),
};
+struct mlx5_ifc_psp_rotate_key_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_psp_rotate_key_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum mlx5_psp_gen_spi_in_key_size {
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_128 = 0x0,
+ MLX5_PSP_GEN_SPI_IN_KEY_SIZE_256 = 0x1,
+};
+
+struct mlx5_ifc_key_spi_bits {
+ u8 spi[0x20];
+
+ u8 reserved_at_20[0x60];
+
+ u8 key[8][0x20];
+};
+
+struct mlx5_ifc_psp_gen_spi_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 key_size[0x2];
+ u8 reserved_at_62[0xe];
+ u8 num_of_spi[0x10];
+};
+
+struct mlx5_ifc_psp_gen_spi_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_spi[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_key_spi_bits key_spi[];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index fc7eeff99a8a..d67aedc6ea68 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -237,13 +237,11 @@ enum {
};
enum {
- MLX5_ETH_WQE_SVLAN = 1 << 0,
MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26,
MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27,
MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26,
MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28,
MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30,
- MLX5_ETH_WQE_INSERT_VLAN = 1 << 15,
};
enum {
@@ -253,9 +251,15 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+/* Metadata bits 0-7 are used by timestamping */
+/* Base shift for metadata bits used by IPsec and MACsec */
+#define MLX5_ETH_WQE_FT_META_SHIFT 8
+
enum {
- MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
- MLX5_ETH_WQE_FT_META_MACSEC = BIT(1),
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC = BIT(1) << MLX5_ETH_WQE_FT_META_SHIFT,
+ MLX5_ETH_WQE_FT_META_MACSEC_FS_ID_MASK =
+ GENMASK(5, 2) << MLX5_ETH_WQE_FT_META_SHIFT,
};
struct mlx5_wqe_eth_seg {
@@ -275,10 +279,6 @@ struct mlx5_wqe_eth_seg {
DECLARE_FLEX_ARRAY(u8, data);
};
} inline_hdr;
- struct {
- __be16 type;
- __be16 vlan_tci;
- } insert;
__be32 trailer;
};
};
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index c36cc6d82926..c87b9507cfa1 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -135,4 +135,6 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod);
+int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id);
+
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1ae97a0b8ec7..06978b4dbeb8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -34,6 +34,8 @@
#include <linux/slab.h>
#include <linux/cacheinfo.h>
#include <linux/rcuwait.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
struct mempolicy;
struct anon_vma;
@@ -69,6 +71,15 @@ static inline void totalram_pages_add(long count)
extern void * high_memory;
+/*
+ * Convert between pages and MB
+ * 20 is the shift for 1MB (2^20 = 1MB)
+ * PAGE_SHIFT is the shift for page size (e.g., 12 for 4KB pages)
+ * So (20 - PAGE_SHIFT) converts between pages and MB
+ */
+#define PAGES_TO_MB(pages) ((pages) >> (20 - PAGE_SHIFT))
+#define MB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
@@ -198,11 +209,13 @@ extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
-#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
-#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio))
+bool page_range_contiguous(const struct page *page, unsigned long nr_pages);
#else
-#define nth_page(page,n) ((page) + (n))
-#define folio_page_idx(folio, p) ((p) - &(folio)->page)
+static inline bool page_range_contiguous(const struct page *page,
+ unsigned long nr_pages)
+{
+ return true;
+}
#endif
/* to align the pointer to the (next) page boundary */
@@ -214,6 +227,20 @@ extern unsigned long sysctl_admin_reserve_kbytes;
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
+/**
+ * folio_page_idx - Return the number of a page in a folio.
+ * @folio: The folio.
+ * @page: The folio page.
+ *
+ * This function expects that the page is actually part of the folio.
+ * The returned number is relative to the start of the folio.
+ */
+static inline unsigned long folio_page_idx(const struct folio *folio,
+ const struct page *page)
+{
+ return page - &folio->page;
+}
+
static inline struct folio *lru_to_folio(struct list_head *head)
{
return list_entry((head)->prev, struct folio, lru);
@@ -648,13 +675,21 @@ struct vm_operations_struct {
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr, pgoff_t *ilx);
#endif
+#ifdef CONFIG_FIND_NORMAL_PAGE
/*
- * Called by vm_normal_page() for special PTEs to find the
- * page for @addr. This is useful if the default behavior
- * (using pte_page()) would not find the correct page.
+ * Called by vm_normal_page() for special PTEs in @vma at @addr. This
+ * allows for returning a "normal" page from vm_normal_page() even
+ * though the PTE indicates that the "struct page" either does not exist
+ * or should not be touched: "special".
+ *
+ * Do not add new users: this really only works when a "normal" page
+ * was mapped, but then the PTE got changed to something weird (+
+ * marked special) that would not make pte_pfn() identify the originally
+ * inserted page.
*/
- struct page *(*find_special_page)(struct vm_area_struct *vma,
- unsigned long addr);
+ struct page *(*find_normal_page)(struct vm_area_struct *vma,
+ unsigned long addr);
+#endif /* CONFIG_FIND_NORMAL_PAGE */
};
#ifdef CONFIG_NUMA_BALANCING
@@ -684,7 +719,7 @@ static inline void release_fault_lock(struct vm_fault *vmf)
mmap_read_unlock(vmf->vma->vm_mm);
}
-static inline void assert_fault_locked(struct vm_fault *vmf)
+static inline void assert_fault_locked(const struct vm_fault *vmf)
{
if (vmf->flags & FAULT_FLAG_VMA_LOCK)
vma_assert_locked(vmf->vma);
@@ -697,12 +732,42 @@ static inline void release_fault_lock(struct vm_fault *vmf)
mmap_read_unlock(vmf->vma->vm_mm);
}
-static inline void assert_fault_locked(struct vm_fault *vmf)
+static inline void assert_fault_locked(const struct vm_fault *vmf)
{
mmap_assert_locked(vmf->vma->vm_mm);
}
#endif /* CONFIG_PER_VMA_LOCK */
+static inline bool mm_flags_test(int flag, const struct mm_struct *mm)
+{
+ return test_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline bool mm_flags_test_and_set(int flag, struct mm_struct *mm)
+{
+ return test_and_set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline bool mm_flags_test_and_clear(int flag, struct mm_struct *mm)
+{
+ return test_and_clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline void mm_flags_set(int flag, struct mm_struct *mm)
+{
+ set_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline void mm_flags_clear(int flag, struct mm_struct *mm)
+{
+ clear_bit(flag, ACCESS_PRIVATE(&mm->flags, __mm_flags));
+}
+
+static inline void mm_flags_clear_all(struct mm_struct *mm)
+{
+ bitmap_zero(ACCESS_PRIVATE(&mm->flags, __mm_flags), NUM_MM_FLAG_BITS);
+}
+
extern const struct vm_operations_struct vma_dummy_vm_ops;
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
@@ -810,7 +875,7 @@ static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
vma->vm_end >= vma->vm_mm->start_stack;
}
-static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
+static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -824,7 +889,7 @@ static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
+static inline bool vma_is_foreign(const struct vm_area_struct *vma)
{
if (!current->mm)
return true;
@@ -835,7 +900,7 @@ static inline bool vma_is_foreign(struct vm_area_struct *vma)
return false;
}
-static inline bool vma_is_accessible(struct vm_area_struct *vma)
+static inline bool vma_is_accessible(const struct vm_area_struct *vma)
{
return vma->vm_flags & VM_ACCESS_FLAGS;
}
@@ -846,7 +911,7 @@ static inline bool is_shared_maywrite(vm_flags_t vm_flags)
(VM_SHARED | VM_MAYWRITE);
}
-static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma)
{
return is_shared_maywrite(vma->vm_flags);
}
@@ -930,14 +995,14 @@ static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
* The vma_is_shmem is not inline because it is used only by slow
* paths in userfault.
*/
-bool vma_is_shmem(struct vm_area_struct *vma);
-bool vma_is_anon_shmem(struct vm_area_struct *vma);
+bool vma_is_shmem(const struct vm_area_struct *vma);
+bool vma_is_anon_shmem(const struct vm_area_struct *vma);
#else
-static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
-static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_shmem(const struct vm_area_struct *vma) { return false; }
+static inline bool vma_is_anon_shmem(const struct vm_area_struct *vma) { return false; }
#endif
-int vma_is_stack_for_current(struct vm_area_struct *vma);
+int vma_is_stack_for_current(const struct vm_area_struct *vma);
/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
@@ -953,12 +1018,12 @@ static inline unsigned int folio_large_order(const struct folio *folio)
}
#ifdef NR_PAGES_IN_LARGE_FOLIO
-static inline long folio_large_nr_pages(const struct folio *folio)
+static inline unsigned long folio_large_nr_pages(const struct folio *folio)
{
return folio->_nr_pages;
}
#else
-static inline long folio_large_nr_pages(const struct folio *folio)
+static inline unsigned long folio_large_nr_pages(const struct folio *folio)
{
return 1L << folio_large_order(folio);
}
@@ -971,11 +1036,11 @@ static inline long folio_large_nr_pages(const struct folio *folio)
* set before the order is initialised, or this may be a tail page.
* See compaction.c for some good examples.
*/
-static inline unsigned int compound_order(struct page *page)
+static inline unsigned int compound_order(const struct page *page)
{
- struct folio *folio = (struct folio *)page;
+ const struct folio *folio = (struct folio *)page;
- if (!test_bit(PG_head, &folio->flags))
+ if (!test_bit(PG_head, &folio->flags.f))
return 0;
return folio_large_order(folio);
}
@@ -1191,7 +1256,7 @@ int folio_mc_copy(struct folio *dst, struct folio *src);
unsigned long nr_free_buffer_pages(void);
/* Returns the number of bytes in this potentially compound page. */
-static inline unsigned long page_size(struct page *page)
+static inline unsigned long page_size(const struct page *page)
{
return PAGE_SIZE << compound_order(page);
}
@@ -1505,21 +1570,26 @@ static inline bool is_nommu_shared_mapping(vm_flags_t flags)
*/
static inline int page_zone_id(struct page *page)
{
- return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
+ return (page->flags.f >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
#ifdef NODE_NOT_IN_PAGE_FLAGS
-int page_to_nid(const struct page *page);
+int memdesc_nid(memdesc_flags_t mdf);
#else
-static inline int page_to_nid(const struct page *page)
+static inline int memdesc_nid(memdesc_flags_t mdf)
{
- return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
+ return (mdf.f >> NODES_PGSHIFT) & NODES_MASK;
}
#endif
+static inline int page_to_nid(const struct page *page)
+{
+ return memdesc_nid(PF_POISONED_CHECK(page)->flags);
+}
+
static inline int folio_nid(const struct folio *folio)
{
- return page_to_nid(&folio->page);
+ return memdesc_nid(folio->flags);
}
#ifdef CONFIG_NUMA_BALANCING
@@ -1588,14 +1658,14 @@ static inline void page_cpupid_reset_last(struct page *page)
#else
static inline int folio_last_cpupid(struct folio *folio)
{
- return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+ return (folio->flags.f >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
- page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
+ page->flags.f |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
@@ -1691,7 +1761,7 @@ static inline u8 page_kasan_tag(const struct page *page)
u8 tag = KASAN_TAG_KERNEL;
if (kasan_enabled()) {
- tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag = (page->flags.f >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}
@@ -1706,12 +1776,12 @@ static inline void page_kasan_tag_set(struct page *page, u8 tag)
return;
tag ^= 0xff;
- old_flags = READ_ONCE(page->flags);
+ old_flags = READ_ONCE(page->flags.f);
do {
flags = old_flags;
flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
- } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
+ } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
@@ -1742,26 +1812,26 @@ static inline pg_data_t *page_pgdat(const struct page *page)
return NODE_DATA(page_to_nid(page));
}
-static inline struct zone *folio_zone(const struct folio *folio)
+static inline pg_data_t *folio_pgdat(const struct folio *folio)
{
- return page_zone(&folio->page);
+ return NODE_DATA(folio_nid(folio));
}
-static inline pg_data_t *folio_pgdat(const struct folio *folio)
+static inline struct zone *folio_zone(const struct folio *folio)
{
- return page_pgdat(&folio->page);
+ return &folio_pgdat(folio)->node_zones[folio_zonenum(folio)];
}
#ifdef SECTION_IN_PAGE_FLAGS
static inline void set_page_section(struct page *page, unsigned long section)
{
- page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
- page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
+ page->flags.f &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+ page->flags.f |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
-static inline unsigned long page_to_section(const struct page *page)
+static inline unsigned long memdesc_section(memdesc_flags_t mdf)
{
- return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+ return (mdf.f >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
#endif
@@ -1785,7 +1855,7 @@ static inline struct folio *pfn_folio(unsigned long pfn)
}
#ifdef CONFIG_MMU
-static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
+static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot)
{
return pfn_pte(page_to_pfn(page), pgprot);
}
@@ -1800,7 +1870,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
*
* Return: A page table entry suitable for mapping this folio.
*/
-static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
+static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot)
{
return pfn_pte(folio_pfn(folio), pgprot);
}
@@ -1816,7 +1886,7 @@ static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot)
*
* Return: A page table entry suitable for mapping this folio.
*/
-static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
+static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot)
{
return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
}
@@ -1832,7 +1902,7 @@ static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
*
* Return: A page table entry suitable for mapping this folio.
*/
-static inline pud_t folio_mk_pud(struct folio *folio, pgprot_t pgprot)
+static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot)
{
return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
}
@@ -1900,7 +1970,7 @@ static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
{
VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
- if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
+ if (!mm_flags_test(MMF_HAS_PINNED, vma->vm_mm))
return false;
return folio_maybe_dma_pinned(folio);
@@ -1966,14 +2036,14 @@ static inline bool folio_is_longterm_pinnable(struct folio *folio)
static inline void set_page_zone(struct page *page, enum zone_type zone)
{
- page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
- page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
+ page->flags.f &= ~(ZONES_MASK << ZONES_PGSHIFT);
+ page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
static inline void set_page_node(struct page *page, unsigned long node)
{
- page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
- page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
+ page->flags.f &= ~(NODES_MASK << NODES_PGSHIFT);
+ page->flags.f |= (node & NODES_MASK) << NODES_PGSHIFT;
}
static inline void set_page_links(struct page *page, enum zone_type zone,
@@ -1992,30 +2062,46 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*
* Return: A positive power of two.
*/
-static inline long folio_nr_pages(const struct folio *folio)
+static inline unsigned long folio_nr_pages(const struct folio *folio)
{
if (!folio_test_large(folio))
return 1;
return folio_large_nr_pages(folio);
}
-/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
-#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
-#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
+#if !defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE)
+/*
+ * We don't expect any folios that exceed buddy sizes (and consequently
+ * memory sections).
+ */
+#define MAX_FOLIO_ORDER MAX_PAGE_ORDER
+#elif defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+/*
+ * Only pages within a single memory section are guaranteed to be
+ * contiguous. By limiting folios to a single memory section, all folio
+ * pages are guaranteed to be contiguous.
+ */
+#define MAX_FOLIO_ORDER PFN_SECTION_SHIFT
#else
-#define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES
+/*
+ * There is no real limit on the folio size. We limit them to the maximum we
+ * currently expect (e.g., hugetlb, dax).
+ */
+#define MAX_FOLIO_ORDER PUD_ORDER
#endif
+#define MAX_FOLIO_NR_PAGES (1UL << MAX_FOLIO_ORDER)
+
/*
* compound_nr() returns the number of pages in this potentially compound
* page. compound_nr() can be called on a tail page, and is defined to
* return 1 in that case.
*/
-static inline long compound_nr(struct page *page)
+static inline unsigned long compound_nr(const struct page *page)
{
- struct folio *folio = (struct folio *)page;
+ const struct folio *folio = (struct folio *)page;
- if (!test_bit(PG_head, &folio->flags))
+ if (!test_bit(PG_head, &folio->flags.f))
return 1;
return folio_large_nr_pages(folio);
}
@@ -2351,6 +2437,8 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t pud);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
@@ -2529,7 +2617,7 @@ void folio_add_pin(struct folio *folio);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
- struct task_struct *task, bool bypass_rlim);
+ const struct task_struct *task, bool bypass_rlim);
struct kvec;
struct page *get_dump_page(unsigned long addr, int *locked);
@@ -2654,7 +2742,7 @@ static inline void update_hiwater_rss(struct mm_struct *mm)
unsigned long _rss = get_mm_rss(mm);
if (data_race(mm->hiwater_rss) < _rss)
- (mm)->hiwater_rss = _rss;
+ data_race(mm->hiwater_rss = _rss);
}
static inline void update_hiwater_vm(struct mm_struct *mm)
@@ -2846,16 +2934,22 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU */
+enum pt_flags {
+ PT_reserved = PG_reserved,
+ /* High bits are used for zone/node/section */
+};
+
static inline struct ptdesc *virt_to_ptdesc(const void *x)
{
return page_ptdesc(virt_to_page(x));
}
-static inline void *ptdesc_to_virt(const struct ptdesc *pt)
-{
- return page_to_virt(ptdesc_page(pt));
-}
-
+/**
+ * ptdesc_address - Virtual address of page table.
+ * @pt: Page table descriptor.
+ *
+ * Return: The first byte of the page table described by @pt.
+ */
static inline void *ptdesc_address(const struct ptdesc *pt)
{
return folio_address(ptdesc_folio(pt));
@@ -2863,7 +2957,7 @@ static inline void *ptdesc_address(const struct ptdesc *pt)
static inline bool pagetable_is_reserved(struct ptdesc *pt)
{
- return folio_test_reserved(ptdesc_folio(pt));
+ return test_bit(PT_reserved, &pt->pt_flags.f);
}
/**
@@ -2973,21 +3067,26 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
+static inline unsigned long ptdesc_nr_pages(const struct ptdesc *ptdesc)
+{
+ return compound_nr(ptdesc_page(ptdesc));
+}
+
static inline void __pagetable_ctor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
+ pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
- __folio_set_pgtable(folio);
- lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ __SetPageTable(ptdesc_page(ptdesc));
+ mod_node_page_state(pgdat, NR_PAGETABLE, ptdesc_nr_pages(ptdesc));
}
static inline void pagetable_dtor(struct ptdesc *ptdesc)
{
- struct folio *folio = ptdesc_folio(ptdesc);
+ pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
ptlock_free(ptdesc);
- __folio_clear_pgtable(folio);
- lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+ __ClearPageTable(ptdesc_page(ptdesc));
+ mod_node_page_state(pgdat, NR_PAGETABLE, -ptdesc_nr_pages(ptdesc));
}
static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
@@ -3292,7 +3391,7 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
/* mmap.c */
-extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
+extern int __vm_enough_memory(const struct mm_struct *mm, long pages, int cap_sys_admin);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void exit_mmap(struct mm_struct *);
bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -3432,7 +3531,7 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
return mtree_load(&mm->mm_mt, addr);
}
-static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_GROWSDOWN)
return stack_guard_gap;
@@ -3444,7 +3543,7 @@ static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
return 0;
}
-static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_start_gap(const struct vm_area_struct *vma)
{
unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
@@ -3455,7 +3554,7 @@ static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
return vm_start;
}
-static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
+static inline unsigned long vm_end_gap(const struct vm_area_struct *vma)
{
unsigned long vm_end = vma->vm_end;
@@ -3467,7 +3566,7 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
return vm_end;
}
-static inline unsigned long vma_pages(struct vm_area_struct *vma)
+static inline unsigned long vma_pages(const struct vm_area_struct *vma)
{
return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
}
@@ -3484,7 +3583,7 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
-static inline bool range_in_vma(struct vm_area_struct *vma,
+static inline bool range_in_vma(const struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
return (vma && vma->vm_start <= start && end <= vma->vm_end);
@@ -3600,7 +3699,7 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* Indicates whether GUP can follow a PROT_NONE mapped page, or whether
* a (NUMA hinting) fault is required.
*/
-static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
+static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma,
unsigned int flags)
{
/*
@@ -3730,7 +3829,7 @@ static inline bool debug_guardpage_enabled(void)
return static_branch_unlikely(&_debug_guardpage_enabled);
}
-static inline bool page_is_guard(struct page *page)
+static inline bool page_is_guard(const struct page *page)
{
if (!debug_guardpage_enabled())
return false;
@@ -3761,7 +3860,7 @@ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
-static inline bool page_is_guard(struct page *page) { return false; }
+static inline bool page_is_guard(const struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -3784,7 +3883,7 @@ static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
}
#endif /* __HAVE_ARCH_GATE_AREA */
-extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
+bool process_shares_mm(const struct task_struct *p, const struct mm_struct *mm);
void drop_slab(void);
@@ -3843,7 +3942,7 @@ void vmemmap_free(unsigned long start, unsigned long end,
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
/* number of pfns from base where pfn_to_page() is valid */
if (altmap)
@@ -3857,7 +3956,7 @@ static inline void vmem_altmap_free(struct vmem_altmap *altmap,
altmap->alloc -= nr_pfns;
}
#else
-static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap)
{
return 0;
}
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 89b518ff097e..d6c1011b38f2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -25,7 +25,7 @@
* 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
* ram or swap backed folio.
*/
-static inline int folio_is_file_lru(struct folio *folio)
+static inline int folio_is_file_lru(const struct folio *folio)
{
return !folio_test_swapbacked(folio);
}
@@ -84,7 +84,7 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
* Return: The LRU list a folio should be on, as an index
* into the array of LRU lists.
*/
-static __always_inline enum lru_list folio_lru_list(struct folio *folio)
+static __always_inline enum lru_list folio_lru_list(const struct folio *folio)
{
enum lru_list lru;
@@ -141,9 +141,9 @@ static inline int lru_tier_from_refs(int refs, bool workingset)
return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs);
}
-static inline int folio_lru_refs(struct folio *folio)
+static inline int folio_lru_refs(const struct folio *folio)
{
- unsigned long flags = READ_ONCE(folio->flags);
+ unsigned long flags = READ_ONCE(folio->flags.f);
if (!(flags & BIT(PG_referenced)))
return 0;
@@ -154,14 +154,14 @@ static inline int folio_lru_refs(struct folio *folio)
return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1;
}
-static inline int folio_lru_gen(struct folio *folio)
+static inline int folio_lru_gen(const struct folio *folio)
{
- unsigned long flags = READ_ONCE(folio->flags);
+ unsigned long flags = READ_ONCE(folio->flags.f);
return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
}
-static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+static inline bool lru_gen_is_active(const struct lruvec *lruvec, int gen)
{
unsigned long max_seq = lruvec->lrugen.max_seq;
@@ -217,12 +217,13 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli
VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
}
-static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio,
+static inline unsigned long lru_gen_folio_seq(const struct lruvec *lruvec,
+ const struct folio *folio,
bool reclaiming)
{
int gen;
int type = folio_is_file_lru(folio);
- struct lru_gen_folio *lrugen = &lruvec->lrugen;
+ const struct lru_gen_folio *lrugen = &lruvec->lrugen;
/*
* +-----------------------------------+-----------------------------------+
@@ -268,7 +269,7 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio,
gen = lru_gen_from_seq(seq);
flags = (gen + 1UL) << LRU_GEN_PGOFF;
/* see the comment on MIN_NR_GENS about PG_active */
- set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+ set_mask_bits(&folio->flags.f, LRU_GEN_MASK | BIT(PG_active), flags);
lru_gen_update_size(lruvec, folio, -1, gen);
/* for folio_rotate_reclaimable() */
@@ -293,7 +294,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
/* for folio_migrate_flags() */
flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
- flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
+ flags = set_mask_bits(&folio->flags.f, LRU_GEN_MASK, flags);
gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
lru_gen_update_size(lruvec, folio, gen, -1);
@@ -302,11 +303,11 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return true;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
{
- unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK;
+ unsigned long refs = READ_ONCE(old->flags.f) & LRU_REFS_MASK;
- set_mask_bits(&new->flags, LRU_REFS_MASK, refs);
+ set_mask_bits(&new->flags.f, LRU_REFS_MASK, refs);
}
#else /* !CONFIG_LRU_GEN */
@@ -330,7 +331,7 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
-static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+static inline void folio_migrate_refs(struct folio *new, const struct folio *old)
{
}
@@ -508,7 +509,7 @@ static inline void dec_tlb_flush_pending(struct mm_struct *mm)
atomic_dec(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+static inline bool mm_tlb_flush_pending(const struct mm_struct *mm)
{
/*
* Must be called after having acquired the PTL; orders against that
@@ -521,7 +522,7 @@ static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
return atomic_read(&mm->tlb_flush_pending);
}
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+static inline bool mm_tlb_flush_nested(const struct mm_struct *mm)
{
/*
* Similar to mm_tlb_flush_pending(), we must have acquired the PTL
@@ -605,7 +606,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
return false;
}
-static inline bool vma_has_recency(struct vm_area_struct *vma)
+static inline bool vma_has_recency(const struct vm_area_struct *vma)
{
if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
return false;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f625c35128b..90e5790c318f 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -20,6 +20,7 @@
#include <linux/seqlock.h>
#include <linux/percpu_counter.h>
#include <linux/types.h>
+#include <linux/bitmap.h>
#include <asm/mmu.h>
@@ -33,6 +34,10 @@ struct address_space;
struct futex_private_hash;
struct mem_cgroup;
+typedef struct {
+ unsigned long f;
+} memdesc_flags_t;
+
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
@@ -71,7 +76,7 @@ struct mem_cgroup;
#endif
struct page {
- unsigned long flags; /* Atomic flags, some possibly
+ memdesc_flags_t flags; /* Atomic flags, some possibly
* updated asynchronously */
/*
* Five words (20/40 bytes) are available in this union.
@@ -89,21 +94,10 @@ struct page {
union {
struct list_head lru;
- /* Or, for the Unevictable "LRU list" slot */
- struct {
- /* Always even, to negate PageTail */
- void *__filler;
- /* Count page's or folio's mlocks */
- unsigned int mlock_count;
- };
-
/* Or, free page */
struct list_head buddy_list;
struct list_head pcp_list;
- struct {
- struct llist_node pcp_llist;
- unsigned int order;
- };
+ struct llist_node pcp_llist;
};
struct address_space *mapping;
union {
@@ -114,7 +108,8 @@ struct page {
* @private: Mapping-private opaque data.
* Usually used for buffer_heads if PagePrivate.
* Used for swp_entry_t if swapcache flag set.
- * Indicates order in the buddy system if PageBuddy.
+ * Indicates order in the buddy system if PageBuddy
+ * or on pcp_llist.
*/
unsigned long private;
};
@@ -382,11 +377,13 @@ struct folio {
union {
struct {
/* public: */
- unsigned long flags;
+ memdesc_flags_t flags;
union {
struct list_head lru;
/* private: avoid cluttering the output */
+ /* For the Unevictable "LRU list" slot */
struct {
+ /* Avoid compound_head */
void *__filler;
/* public: */
unsigned int mlock_count;
@@ -525,7 +522,7 @@ FOLIO_MATCH(compound_head, _head_3);
/**
* struct ptdesc - Memory descriptor for page tables.
- * @__page_flags: Same as page flags. Powerpc only.
+ * @pt_flags: enum pt_flags plus zone/node/section.
* @pt_rcu_head: For freeing page table pages.
* @pt_list: List of used page tables. Used for s390 gmap shadow pages
* (which are not linked into the user page tables) and x86
@@ -547,7 +544,7 @@ FOLIO_MATCH(compound_head, _head_3);
* understanding of the issues.
*/
struct ptdesc {
- unsigned long __page_flags;
+ memdesc_flags_t pt_flags;
union {
struct rcu_head pt_rcu_head;
@@ -585,7 +582,7 @@ struct ptdesc {
#define TABLE_MATCH(pg, pt) \
static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
-TABLE_MATCH(flags, __page_flags);
+TABLE_MATCH(flags, pt_flags);
TABLE_MATCH(compound_head, pt_list);
TABLE_MATCH(compound_head, _pt_pad_1);
TABLE_MATCH(mapping, __page_mapping);
@@ -627,7 +624,7 @@ static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
atomic_dec(&ptdesc->pt_share_count);
}
-static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
+static inline int ptdesc_pmd_pts_count(const struct ptdesc *ptdesc)
{
return atomic_read(&ptdesc->pt_share_count);
}
@@ -660,7 +657,7 @@ static inline void set_page_private(struct page *page, unsigned long private)
page->private = private;
}
-static inline void *folio_get_private(struct folio *folio)
+static inline void *folio_get_private(const struct folio *folio)
{
return folio->private;
}
@@ -785,13 +782,14 @@ struct pfnmap_track_ctx {
*/
struct vm_area_desc {
/* Immutable state. */
- struct mm_struct *mm;
+ const struct mm_struct *const mm;
+ struct file *const file; /* May vary from vm_file in stacked callers. */
unsigned long start;
unsigned long end;
/* Mutable fields. Populated with initial state. */
pgoff_t pgoff;
- struct file *file;
+ struct file *vm_file;
vm_flags_t vm_flags;
pgprot_t page_prot;
@@ -932,6 +930,15 @@ struct mm_cid {
};
#endif
+/*
+ * Opaque type representing current mm_struct flag state. Must be accessed via
+ * mm_flags_xxx() helper functions.
+ */
+#define NUM_MM_FLAG_BITS (64)
+typedef struct {
+ DECLARE_BITMAP(__mm_flags, NUM_MM_FLAG_BITS);
+} __private mm_flags_t;
+
struct kioctx_table;
struct iommu_mm_data;
struct mm_struct {
@@ -1031,10 +1038,10 @@ struct mm_struct {
* counters
*/
/*
- * With some kernel config, the current mmap_lock's offset
- * inside 'mm_struct' is at 0x120, which is very optimal, as
+ * Typically the current mmap_lock's offset is 56 bytes from
+ * the last cacheline boundary, which is very optimal, as
* its two hot fields 'count' and 'owner' sit in 2 different
- * cachelines, and when mmap_lock is highly contended, both
+ * cachelines, and when mmap_lock is highly contended, both
* of the 2 fields will be accessed frequently, current layout
* will help to reduce cache bouncing.
*
@@ -1119,7 +1126,7 @@ struct mm_struct {
/* Architecture-specific MM context */
mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access */
+ mm_flags_t flags; /* Must use mm_flags_* hlpers to access */
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
@@ -1229,6 +1236,40 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
+/* Set the first system word of mm flags, non-atomically. */
+static inline void __mm_flags_set_word(struct mm_struct *mm, unsigned long value)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
+
+ bitmap_copy(bitmap, &value, BITS_PER_LONG);
+}
+
+/* Obtain a read-only view of the bitmap. */
+static inline const unsigned long *__mm_flags_get_bitmap(const struct mm_struct *mm)
+{
+ return (const unsigned long *)ACCESS_PRIVATE(&mm->flags, __mm_flags);
+}
+
+/* Read the first system word of mm flags, non-atomically. */
+static inline unsigned long __mm_flags_get_word(const struct mm_struct *mm)
+{
+ const unsigned long *bitmap = __mm_flags_get_bitmap(mm);
+
+ return bitmap_read(bitmap, 0, BITS_PER_LONG);
+}
+
+/*
+ * Update the first system word of mm flags ONLY, applying the specified mask to
+ * it, then setting all flags specified by bits.
+ */
+static inline void __mm_flags_set_mask_bits_word(struct mm_struct *mm,
+ unsigned long mask, unsigned long bits)
+{
+ unsigned long *bitmap = ACCESS_PRIVATE(&mm->flags, __mm_flags);
+
+ set_mask_bits(bitmap, mask, bits);
+}
+
#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | \
MT_FLAGS_USE_RCU)
extern struct mm_struct init_mm;
@@ -1729,7 +1770,7 @@ enum {
* the modes are SUID_DUMP_* defined in linux/sched/coredump.h
*/
#define MMF_DUMPABLE_BITS 2
-#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
+#define MMF_DUMPABLE_MASK (BIT(MMF_DUMPABLE_BITS) - 1)
/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE 2
#define MMF_DUMP_ANON_SHARED 3
@@ -1744,13 +1785,13 @@ enum {
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
#define MMF_DUMP_FILTER_BITS 9
#define MMF_DUMP_FILTER_MASK \
- (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
+ ((BIT(MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
- ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
- (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
+ (BIT(MMF_DUMP_ANON_PRIVATE) | BIT(MMF_DUMP_ANON_SHARED) | \
+ BIT(MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
-# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
+# define MMF_DUMP_MASK_DEFAULT_ELF BIT(MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF 0
#endif
@@ -1758,19 +1799,16 @@ enum {
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when mm is available for khugepaged */
-/*
- * This one-shot flag is dropped due to necessity of changing exe once again
- * on NFS restore
- */
-//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
+#define MMF_HUGE_ZERO_FOLIO 18 /* mm has ever used the global huge zero folio */
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
-#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
-#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
-#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
+#define MMF_DISABLE_THP_EXCEPT_ADVISED 23 /* no THP except when advised (e.g., VM_HUGEPAGE) */
+#define MMF_DISABLE_THP_COMPLETELY 24 /* no THP for all VMAs */
+#define MMF_DISABLE_THP_MASK (BIT(MMF_DISABLE_THP_COMPLETELY) | \
+ BIT(MMF_DISABLE_THP_EXCEPT_ADVISED))
#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
/*
@@ -1783,27 +1821,33 @@ enum {
#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
#define MMF_HAS_MDWE 28
-#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE)
-
+#define MMF_HAS_MDWE_MASK BIT(MMF_HAS_MDWE)
#define MMF_HAS_MDWE_NO_INHERIT 29
#define MMF_VM_MERGE_ANY 30
-#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY)
+#define MMF_VM_MERGE_ANY_MASK BIT(MMF_VM_MERGE_ANY)
#define MMF_TOPDOWN 31 /* mm searches top down by default */
-#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN)
+#define MMF_TOPDOWN_MASK BIT(MMF_TOPDOWN)
-#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
+#define MMF_INIT_LEGACY_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK)
-static inline unsigned long mmf_init_flags(unsigned long flags)
+/* Legacy flags must fit within 32 bits. */
+static_assert((u64)MMF_INIT_LEGACY_MASK <= (u64)UINT_MAX);
+
+/*
+ * Initialise legacy flags according to masks, propagating selected flags on
+ * fork. Further flag manipulation can be performed by the caller.
+ */
+static inline unsigned long mmf_init_legacy_flags(unsigned long flags)
{
if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
flags &= ~((1UL << MMF_HAS_MDWE) |
(1UL << MMF_HAS_MDWE_NO_INHERIT));
- return flags & MMF_INIT_MASK;
+ return flags & MMF_INIT_LEGACY_MASK;
}
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mman.h b/include/linux/mman.h
index de9e8e6229a4..0ba8a7e8b90a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -201,7 +201,7 @@ static inline bool arch_memory_deny_write_exec_supported(void)
static inline bool map_deny_write_exec(unsigned long old, unsigned long new)
{
/* If MDWE is disabled, we have nothing to deny. */
- if (!test_bit(MMF_HAS_MDWE, &current->mm->flags))
+ if (!mm_flags_test(MMF_HAS_MDWE, current->mm))
return false;
/* If the new VMA is not executable, we have nothing to deny. */
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 11a078de9150..2c9fffa58714 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -148,91 +148,6 @@ static inline void vma_refcount_put(struct vm_area_struct *vma)
}
/*
- * Try to read-lock a vma. The function is allowed to occasionally yield false
- * locked result to avoid performance overhead, in which case we fall back to
- * using mmap_lock. The function should never yield false unlocked result.
- * False locked result is possible if mm_lock_seq overflows or if vma gets
- * reused and attached to a different mm before we lock it.
- * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got
- * detached.
- *
- * WARNING! The vma passed to this function cannot be used if the function
- * fails to lock it because in certain cases RCU lock is dropped and then
- * reacquired. Once RCU lock is dropped the vma can be concurently freed.
- */
-static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
- struct vm_area_struct *vma)
-{
- int oldcnt;
-
- /*
- * Check before locking. A race might cause false locked result.
- * We can use READ_ONCE() for the mm_lock_seq here, and don't need
- * ACQUIRE semantics, because this is just a lockless check whose result
- * we don't rely on for anything - the mm_lock_seq read against which we
- * need ordering is below.
- */
- if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence))
- return NULL;
-
- /*
- * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited_acquire()
- * will fail because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET.
- * Acquire fence is required here to avoid reordering against later
- * vm_lock_seq check and checks inside lock_vma_under_rcu().
- */
- if (unlikely(!__refcount_inc_not_zero_limited_acquire(&vma->vm_refcnt, &oldcnt,
- VMA_REF_LIMIT))) {
- /* return EAGAIN if vma got detached from under us */
- return oldcnt ? NULL : ERR_PTR(-EAGAIN);
- }
-
- rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
-
- /*
- * If vma got attached to another mm from under us, that mm is not
- * stable and can be freed in the narrow window after vma->vm_refcnt
- * is dropped and before rcuwait_wake_up(mm) is called. Grab it before
- * releasing vma->vm_refcnt.
- */
- if (unlikely(vma->vm_mm != mm)) {
- /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */
- struct mm_struct *other_mm = vma->vm_mm;
-
- /*
- * __mmdrop() is a heavy operation and we don't need RCU
- * protection here. Release RCU lock during these operations.
- * We reinstate the RCU read lock as the caller expects it to
- * be held when this function returns even on error.
- */
- rcu_read_unlock();
- mmgrab(other_mm);
- vma_refcount_put(vma);
- mmdrop(other_mm);
- rcu_read_lock();
- return NULL;
- }
-
- /*
- * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result.
- * False unlocked result is impossible because we modify and check
- * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq
- * modification invalidates all existing locks.
- *
- * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
- * racing with vma_end_write_all(), we only start reading from the VMA
- * after it has been unlocked.
- * This pairs with RELEASE semantics in vma_end_write_all().
- */
- if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) {
- vma_refcount_put(vma);
- return NULL;
- }
-
- return vma;
-}
-
-/*
* Use only while holding mmap read lock which guarantees that locking will not
* fail (nobody can concurrently write-lock the vma). vma_start_read() should
* not be used in such cases because it might fail due to mm_lock_seq overflow.
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index fe3d6d98f8da..673cbdf43453 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -77,7 +77,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43439 0xa9af
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_43751 0xaae7
-#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8
+#define SDIO_DEVICE_ID_BROADCOM_43752 0xaae8
#define SDIO_VENDOR_ID_CYPRESS 0x04b4
#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 0c5da9141983..7fb7331c5725 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -234,7 +234,21 @@ enum node_stat_item {
#endif
#ifdef CONFIG_NUMA_BALANCING
PGPROMOTE_SUCCESS, /* promote successfully */
- PGPROMOTE_CANDIDATE, /* candidate pages to promote */
+ /**
+ * Candidate pages for promotion based on hint fault latency. This
+ * counter is used to control the promotion rate and adjust the hot
+ * threshold.
+ */
+ PGPROMOTE_CANDIDATE,
+ /**
+ * Not rate-limited (NRL) candidate pages for those can be promoted
+ * without considering hot threshold because of enough free pages in
+ * fast-tier node. These promotions bypass the regular hotness checks
+ * and do NOT influence the promotion rate-limiter or
+ * threshold-adjustment logic.
+ * This is for statistics/monitoring purposes.
+ */
+ PGPROMOTE_CANDIDATE_NRL,
#endif
/* PGDEMOTE_*: pages demoted */
PGDEMOTE_KSWAPD,
@@ -245,6 +259,7 @@ enum node_stat_item {
NR_HUGETLB,
#endif
NR_BALLOON_PAGES,
+ NR_KERNEL_FILE_PAGES,
NR_VM_NODE_STAT_ITEMS
};
@@ -1089,7 +1104,7 @@ static inline unsigned long promo_wmark_pages(const struct zone *z)
return wmark_pages(z, WMARK_PROMO);
}
-static inline unsigned long zone_managed_pages(struct zone *zone)
+static inline unsigned long zone_managed_pages(const struct zone *zone)
{
return (unsigned long)atomic_long_read(&zone->managed_pages);
}
@@ -1113,12 +1128,12 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}
-static inline bool zone_is_initialized(struct zone *zone)
+static inline bool zone_is_initialized(const struct zone *zone)
{
return zone->initialized;
}
-static inline bool zone_is_empty(struct zone *zone)
+static inline bool zone_is_empty(const struct zone *zone)
{
return zone->spanned_pages == 0;
}
@@ -1169,26 +1184,31 @@ static inline bool zone_is_empty(struct zone *zone)
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+static inline enum zone_type memdesc_zonenum(memdesc_flags_t flags)
+{
+ ASSERT_EXCLUSIVE_BITS(flags.f, ZONES_MASK << ZONES_PGSHIFT);
+ return (flags.f >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
static inline enum zone_type page_zonenum(const struct page *page)
{
- ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+ return memdesc_zonenum(page->flags);
}
static inline enum zone_type folio_zonenum(const struct folio *folio)
{
- return page_zonenum(&folio->page);
+ return memdesc_zonenum(folio->flags);
}
#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_zone_device_page(const struct page *page)
+static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
{
- return page_zonenum(page) == ZONE_DEVICE;
+ return memdesc_zonenum(mdf) == ZONE_DEVICE;
}
static inline struct dev_pagemap *page_pgmap(const struct page *page)
{
- VM_WARN_ON_ONCE_PAGE(!is_zone_device_page(page), page);
+ VM_WARN_ON_ONCE_PAGE(!memdesc_is_zone_device(page->flags), page);
return page_folio(page)->pgmap;
}
@@ -1203,9 +1223,9 @@ static inline struct dev_pagemap *page_pgmap(const struct page *page)
static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
const struct page *b)
{
- if (is_zone_device_page(a) != is_zone_device_page(b))
+ if (memdesc_is_zone_device(a->flags) != memdesc_is_zone_device(b->flags))
return false;
- if (!is_zone_device_page(a))
+ if (!memdesc_is_zone_device(a->flags))
return true;
return page_pgmap(a) == page_pgmap(b);
}
@@ -1213,7 +1233,7 @@ static inline bool zone_device_pages_have_same_pgmap(const struct page *a,
extern void memmap_init_zone_device(struct zone *, unsigned long,
unsigned long, struct dev_pagemap *);
#else
-static inline bool is_zone_device_page(const struct page *page)
+static inline bool memdesc_is_zone_device(memdesc_flags_t mdf)
{
return false;
}
@@ -1228,9 +1248,14 @@ static inline struct dev_pagemap *page_pgmap(const struct page *page)
}
#endif
+static inline bool is_zone_device_page(const struct page *page)
+{
+ return memdesc_is_zone_device(page->flags);
+}
+
static inline bool folio_is_zone_device(const struct folio *folio)
{
- return is_zone_device_page(&folio->page);
+ return memdesc_is_zone_device(folio->flags);
}
static inline bool is_zone_movable_page(const struct page *page)
@@ -1248,7 +1273,7 @@ static inline bool folio_is_zone_movable(const struct folio *folio)
* Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
* intersection with the given zone
*/
-static inline bool zone_intersects(struct zone *zone,
+static inline bool zone_intersects(const struct zone *zone,
unsigned long start_pfn, unsigned long nr_pages)
{
if (zone_is_empty(zone))
@@ -1415,7 +1440,7 @@ typedef struct pglist_data {
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
- int kswapd_failures; /* Number of 'reclaimed == 0' runs */
+ atomic_t kswapd_failures; /* Number of 'reclaimed == 0' runs */
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
@@ -1556,12 +1581,12 @@ static inline int local_memory_node(int node_id) { return node_id; };
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
#ifdef CONFIG_ZONE_DEVICE
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return zone_idx(zone) == ZONE_DEVICE;
}
#else
-static inline bool zone_is_zone_device(struct zone *zone)
+static inline bool zone_is_zone_device(const struct zone *zone)
{
return false;
}
@@ -1573,19 +1598,19 @@ static inline bool zone_is_zone_device(struct zone *zone)
* populated_zone(). If the whole zone is reserved then we can easily
* end up with populated_zone() && !managed_zone().
*/
-static inline bool managed_zone(struct zone *zone)
+static inline bool managed_zone(const struct zone *zone)
{
return zone_managed_pages(zone);
}
/* Returns true if a zone has memory */
-static inline bool populated_zone(struct zone *zone)
+static inline bool populated_zone(const struct zone *zone)
{
return zone->present_pages;
}
#ifdef CONFIG_NUMA
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return zone->node;
}
@@ -1595,7 +1620,7 @@ static inline void zone_set_nid(struct zone *zone, int nid)
zone->node = nid;
}
#else
-static inline int zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(const struct zone *zone)
{
return 0;
}
@@ -1622,7 +1647,7 @@ static inline int is_highmem_idx(enum zone_type idx)
* @zone: pointer to struct zone variable
* Return: 1 for a highmem zone, 0 otherwise
*/
-static inline int is_highmem(struct zone *zone)
+static inline int is_highmem(const struct zone *zone)
{
return is_highmem_idx(zone_idx(zone));
}
@@ -1688,12 +1713,12 @@ static inline struct zone *zonelist_zone(struct zoneref *zoneref)
return zoneref->zone;
}
-static inline int zonelist_zone_idx(struct zoneref *zoneref)
+static inline int zonelist_zone_idx(const struct zoneref *zoneref)
{
return zoneref->zone_idx;
}
-static inline int zonelist_node_idx(struct zoneref *zoneref)
+static inline int zonelist_node_idx(const struct zoneref *zoneref)
{
return zone_to_nid(zoneref->zone);
}
@@ -1996,7 +2021,7 @@ static inline struct page *__section_mem_map_addr(struct mem_section *section)
return (struct page *)map;
}
-static inline int present_section(struct mem_section *section)
+static inline int present_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
}
@@ -2006,12 +2031,12 @@ static inline int present_section_nr(unsigned long nr)
return present_section(__nr_to_section(nr));
}
-static inline int valid_section(struct mem_section *section)
+static inline int valid_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
}
-static inline int early_section(struct mem_section *section)
+static inline int early_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_EARLY));
}
@@ -2021,27 +2046,27 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
-static inline int online_section(struct mem_section *section)
+static inline int online_section(const struct mem_section *section)
{
return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}
#ifdef CONFIG_ZONE_DEVICE
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE;
return section && ((section->section_mem_map & flags) == flags);
}
#else
-static inline int online_device_section(struct mem_section *section)
+static inline int online_device_section(const struct mem_section *section)
{
return 0;
}
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return (section &&
(section->section_mem_map & SECTION_IS_VMEMMAP_PREINIT));
@@ -2051,7 +2076,7 @@ void sparse_vmemmap_init_nid_early(int nid);
void sparse_vmemmap_init_nid_late(int nid);
#else
-static inline int preinited_vmemmap_section(struct mem_section *section)
+static inline int preinited_vmemmap_section(const struct mem_section *section)
{
return 0;
}
diff --git a/include/linux/module.h b/include/linux/module.h
index 3319a5269d28..e135cc79acee 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -244,14 +244,22 @@ struct module_kobject *lookup_or_create_module_kobject(const char *name);
/* What your module does. */
#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
-#ifdef MODULE
+/*
+ * Format: __mod_device_table__kmod_<modname>__<type>__<name>
+ * Parts of the string `__kmod_` and `__` are used as delimiters when parsing
+ * a symbol in file2alias.c
+ */
+#define __mod_device_table(type, name) \
+ __PASTE(__mod_device_table__, \
+ __PASTE(__KBUILD_MODNAME, \
+ __PASTE(__, \
+ __PASTE(type, \
+ __PASTE(__, name)))))
+
/* Creates an alias so file2alias.c can find device table. */
#define MODULE_DEVICE_TABLE(type, name) \
-static typeof(name) __mod_device_table__##type##__##name \
+static typeof(name) __mod_device_table(type, name) \
__attribute__ ((used, alias(__stringify(name))))
-#else /* !MODULE */
-#define MODULE_DEVICE_TABLE(type, name)
-#endif
/* Version of form [<epoch>:]<version>[-<extra-version>].
* Or for CVS/RCS ID version, everything but the number is stripped.
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 3a25122d83e2..6907aedc4f74 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -349,6 +349,19 @@ static inline void kernel_param_unlock(struct module *mod)
__module_param_call("", name, &param_ops_##type, &var, perm, \
-1, KERNEL_PARAM_FL_UNSAFE)
+/**
+ * __core_param_cb - similar like core_param, with a set/get ops instead of type.
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @ops: the set & get operations for this parameter.
+ * @perm: visibility in sysfs
+ *
+ * Ideally this should be called 'core_param_cb', but the name has been
+ * used for module core parameter, so add the '__' prefix
+ */
+#define __core_param_cb(name, ops, arg, perm) \
+ __module_param_call("", name, ops, arg, perm, -1, 0)
+
#endif /* !MODULE */
/**
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5f9c053b0897..acfe7ef86a1b 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -33,7 +33,6 @@ enum mount_flags {
MNT_NOSYMFOLLOW = 0x80,
MNT_SHRINKABLE = 0x100,
- MNT_WRITE_HOLD = 0x200,
MNT_INTERNAL = 0x4000,
@@ -52,7 +51,7 @@ enum mount_flags {
| MNT_READONLY | MNT_NOSYMFOLLOW,
MNT_ATIME_MASK = MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME,
- MNT_INTERNAL_FLAGS = MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED |
+ MNT_INTERNAL_FLAGS = MNT_INTERNAL | MNT_DOOMED |
MNT_SYNC_UMOUNT | MNT_LOCKED
};
@@ -77,7 +76,7 @@ extern void mntput(struct vfsmount *mnt);
extern struct vfsmount *mntget(struct vfsmount *mnt);
extern void mnt_make_shortterm(struct vfsmount *mnt);
extern struct vfsmount *mnt_clone_internal(const struct path *path);
-extern bool __mnt_is_readonly(struct vfsmount *mnt);
+extern bool __mnt_is_readonly(const struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
extern struct vfsmount *clone_private_mount(const struct path *path);
@@ -104,8 +103,8 @@ extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
int do_mount(const char *, const char __user *,
const char *, unsigned long, void *);
-extern struct path *collect_paths(const struct path *, struct path *, unsigned);
-extern void drop_collected_paths(struct path *, struct path *);
+extern const struct path *collect_paths(const struct path *, struct path *, unsigned);
+extern void drop_collected_paths(const struct path *, const struct path *);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
extern int cifs_root_data(char **dev, char **opts);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index a7800ef04e76..fed86221c69c 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -61,10 +61,10 @@ struct dentry *kern_path_parent(const char *name, struct path *parent);
extern struct dentry *start_creating_path(int, const char *, struct path *, unsigned int);
extern struct dentry *start_creating_user_path(int, const char __user *, struct path *, unsigned int);
-extern void end_creating_path(struct path *, struct dentry *);
+extern void end_creating_path(const struct path *, struct dentry *);
extern struct dentry *start_removing_path(const char *, struct path *);
extern struct dentry *start_removing_user_path_at(int , const char __user *, struct path *);
-static inline void end_removing_path(struct path *path , struct dentry *dentry)
+static inline void end_removing_path(const struct path *path , struct dentry *dentry)
{
end_creating_path(path, dentry);
}
diff --git a/include/linux/net/intel/libie/adminq.h b/include/linux/net/intel/libie/adminq.h
index 012b5d499c1a..ab13bd777a28 100644
--- a/include/linux/net/intel/libie/adminq.h
+++ b/include/linux/net/intel/libie/adminq.h
@@ -9,6 +9,7 @@
#define LIBIE_CHECK_STRUCT_LEN(n, X) \
static_assert((n) == sizeof(struct X))
+#define LIBIE_AQ_MAX_BUF_LEN 4096
/**
* struct libie_aqc_generic - Generic structure used in adminq communication
@@ -192,8 +193,9 @@ LIBIE_CHECK_STRUCT_LEN(16, libie_aqc_list_caps);
#define LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
#define LIBIE_AQC_CAPS_NAC_TOPOLOGY 0x0087
#define LIBIE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
-#define LIBIE_AQC_BIT_ROCEV2_LAG 0x01
-#define LIBIE_AQC_BIT_SRIOV_LAG 0x02
+#define LIBIE_AQC_BIT_ROCEV2_LAG BIT(0)
+#define LIBIE_AQC_BIT_SRIOV_LAG BIT(1)
+#define LIBIE_AQC_BIT_SRIOV_AA_LAG BIT(2)
#define LIBIE_AQC_CAPS_FLEX10 0x00F1
#define LIBIE_AQC_CAPS_CEM 0x00F2
@@ -221,6 +223,94 @@ struct libie_aqc_list_caps_elem {
};
LIBIE_CHECK_STRUCT_LEN(32, libie_aqc_list_caps_elem);
+/* Admin Queue command opcodes */
+enum libie_adminq_opc {
+ /* FW Logging Commands */
+ libie_aqc_opc_fw_logs_config = 0xFF30,
+ libie_aqc_opc_fw_logs_register = 0xFF31,
+ libie_aqc_opc_fw_logs_query = 0xFF32,
+ libie_aqc_opc_fw_logs_event = 0xFF33,
+};
+
+enum libie_aqc_fw_logging_mod {
+ LIBIE_AQC_FW_LOG_ID_GENERAL = 0,
+ LIBIE_AQC_FW_LOG_ID_CTRL,
+ LIBIE_AQC_FW_LOG_ID_LINK,
+ LIBIE_AQC_FW_LOG_ID_LINK_TOPO,
+ LIBIE_AQC_FW_LOG_ID_DNL,
+ LIBIE_AQC_FW_LOG_ID_I2C,
+ LIBIE_AQC_FW_LOG_ID_SDP,
+ LIBIE_AQC_FW_LOG_ID_MDIO,
+ LIBIE_AQC_FW_LOG_ID_ADMINQ,
+ LIBIE_AQC_FW_LOG_ID_HDMA,
+ LIBIE_AQC_FW_LOG_ID_LLDP,
+ LIBIE_AQC_FW_LOG_ID_DCBX,
+ LIBIE_AQC_FW_LOG_ID_DCB,
+ LIBIE_AQC_FW_LOG_ID_XLR,
+ LIBIE_AQC_FW_LOG_ID_NVM,
+ LIBIE_AQC_FW_LOG_ID_AUTH,
+ LIBIE_AQC_FW_LOG_ID_VPD,
+ LIBIE_AQC_FW_LOG_ID_IOSF,
+ LIBIE_AQC_FW_LOG_ID_PARSER,
+ LIBIE_AQC_FW_LOG_ID_SW,
+ LIBIE_AQC_FW_LOG_ID_SCHEDULER,
+ LIBIE_AQC_FW_LOG_ID_TXQ,
+ LIBIE_AQC_FW_LOG_ID_RSVD,
+ LIBIE_AQC_FW_LOG_ID_POST,
+ LIBIE_AQC_FW_LOG_ID_WATCHDOG,
+ LIBIE_AQC_FW_LOG_ID_TASK_DISPATCH,
+ LIBIE_AQC_FW_LOG_ID_MNG,
+ LIBIE_AQC_FW_LOG_ID_SYNCE,
+ LIBIE_AQC_FW_LOG_ID_HEALTH,
+ LIBIE_AQC_FW_LOG_ID_TSDRV,
+ LIBIE_AQC_FW_LOG_ID_PFREG,
+ LIBIE_AQC_FW_LOG_ID_MDLVER,
+ LIBIE_AQC_FW_LOG_ID_MAX
+};
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ */
+#define LIBIE_AQC_FW_LOG_CONF_UART_EN BIT(0)
+#define LIBIE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
+#define LIBIE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
+#define LIBIE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
+#define LIBIE_AQC_FW_LOG_AQ_REGISTER BIT(0)
+#define LIBIE_AQC_FW_LOG_AQ_QUERY BIT(2)
+
+#define LIBIE_AQC_FW_LOG_MIN_RESOLUTION 1
+#define LIBIE_AQC_FW_LOG_MAX_RESOLUTION 128
+
+struct libie_aqc_fw_log {
+ u8 cmd_flags;
+
+ u8 rsp_flag;
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct libie_aqc_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
/**
* struct libie_aq_desc - Admin Queue (AQ) descriptor
* @flags: LIBIE_AQ_FLAG_* flags
@@ -252,6 +342,7 @@ struct libie_aq_desc {
struct libie_aqc_driver_ver driver_ver;
struct libie_aqc_req_res res_owner;
struct libie_aqc_list_caps get_cap;
+ struct libie_aqc_fw_log fw_log;
} params;
};
LIBIE_CHECK_STRUCT_LEN(32, libie_aq_desc);
diff --git a/include/linux/net/intel/libie/fwlog.h b/include/linux/net/intel/libie/fwlog.h
new file mode 100644
index 000000000000..36b13fabca9e
--- /dev/null
+++ b/include/linux/net/intel/libie/fwlog.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _LIBIE_FWLOG_H_
+#define _LIBIE_FWLOG_H_
+
+#include <linux/net/intel/libie/adminq.h>
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to LIBIE_FW_LOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except LIBIE_FW_LOG_LEVEL_NONE)
+ */
+enum libie_fwlog_level {
+ LIBIE_FWLOG_LEVEL_NONE = 0,
+ LIBIE_FWLOG_LEVEL_ERROR = 1,
+ LIBIE_FWLOG_LEVEL_WARNING = 2,
+ LIBIE_FWLOG_LEVEL_NORMAL = 3,
+ LIBIE_FWLOG_LEVEL_VERBOSE = 4,
+ LIBIE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct libie_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct libie_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct libie_fwlog_module_entry module_entries[LIBIE_AQC_FW_LOG_ID_MAX];
+ /* options used to configure firmware logging */
+ u16 options;
+#define LIBIE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define LIBIE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling libie_fwlog_init() so the PF registers for
+ * firmware logging on initialization
+ */
+#define LIBIE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the libie_aq_fwlog_get() response if the PF is registered for
+ * FW logging events over ARQ
+ */
+#define LIBIE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u16 log_resolution;
+};
+
+struct libie_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct libie_fwlog_ring {
+ struct libie_fwlog_data *rings;
+ u16 index;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define LIBIE_FWLOG_RING_SIZE_INDEX_DFLT 3
+#define LIBIE_FWLOG_RING_SIZE_DFLT 256
+#define LIBIE_FWLOG_RING_SIZE_MAX 512
+
+struct libie_fwlog {
+ struct libie_fwlog_cfg cfg;
+ bool supported; /* does hardware support FW logging? */
+ struct libie_fwlog_ring ring;
+ struct dentry *debugfs;
+ /* keep track of all the dentrys for FW log modules */
+ struct dentry **debugfs_modules;
+ struct_group_tagged(libie_fwlog_api, api,
+ struct pci_dev *pdev;
+ int (*send_cmd)(void *, struct libie_aq_desc *, void *, u16);
+ void *priv;
+ struct dentry *debugfs_root;
+ );
+};
+
+int libie_fwlog_init(struct libie_fwlog *fwlog, struct libie_fwlog_api *api);
+void libie_fwlog_deinit(struct libie_fwlog *fwlog);
+void libie_fwlog_reregister(struct libie_fwlog *fwlog);
+void libie_get_fwlog_data(struct libie_fwlog *fwlog, u8 *buf, u16 len);
+#endif /* _LIBIE_FWLOG_H_ */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f3a3b761abfb..d1a687444b27 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1906,6 +1906,7 @@ enum netdev_reg_state {
* device struct
* @mpls_ptr: mpls_dev struct pointer
* @mctp_ptr: MCTP specific data
+ * @psp_dev: PSP crypto device registered for this netdev
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -2310,6 +2311,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_MCTP)
struct mctp_dev __rcu *mctp_ptr;
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ struct psp_dev __rcu *psp_dev;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -3459,6 +3463,32 @@ static inline bool dev_has_header(const struct net_device *dev)
return dev->header_ops && dev->header_ops->create;
}
+struct numa_drop_counters {
+ atomic_t drops0 ____cacheline_aligned_in_smp;
+ atomic_t drops1 ____cacheline_aligned_in_smp;
+};
+
+static inline int numa_drop_read(const struct numa_drop_counters *ndc)
+{
+ return atomic_read(&ndc->drops0) + atomic_read(&ndc->drops1);
+}
+
+static inline void numa_drop_add(struct numa_drop_counters *ndc, int val)
+{
+ int n = numa_node_id() % 2;
+
+ if (n)
+ atomic_add(val, &ndc->drops1);
+ else
+ atomic_add(val, &ndc->drops0);
+}
+
+static inline void numa_drop_reset(struct numa_drop_counters *ndc)
+{
+ atomic_set(&ndc->drops0, 0);
+ atomic_set(&ndc->drops1, 0);
+}
+
/*
* Incoming packets are placed on per-CPU queues
*/
@@ -3504,13 +3534,9 @@ struct softnet_data {
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
- atomic_t dropped ____cacheline_aligned_in_smp;
+ struct numa_drop_counters drop_counters;
- /* Another possibly contended cache line */
- spinlock_t defer_lock ____cacheline_aligned_in_smp;
- int defer_count;
- int defer_ipi_scheduled;
- struct sk_buff *defer_list;
+ int defer_ipi_scheduled ____cacheline_aligned_in_smp;
call_single_data_t defer_csd;
};
@@ -5290,13 +5316,18 @@ void skb_warn_bad_offload(const struct sk_buff *skb);
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
- netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
+ netdev_features_t feature;
+
+ if (gso_type & (SKB_GSO_TCP_FIXEDID | SKB_GSO_TCP_FIXEDID_INNER))
+ gso_type |= __SKB_GSO_TCP_FIXEDID;
+
+ feature = ((netdev_features_t)gso_type << NETIF_F_GSO_SHIFT) & NETIF_F_GSO_MASK;
/* check flags correspondence */
BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
- BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(__SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 98c96d649bf9..72ee7d210a74 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -21,7 +21,7 @@
#include <linux/rolling_buffer.h>
enum netfs_sreq_ref_trace;
-typedef struct mempool_s mempool_t;
+typedef struct mempool mempool_t;
struct folio_queue;
/**
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index b5ea9882eda8..f22eec466040 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -55,7 +55,6 @@ struct netpoll_info {
struct delayed_work tx_work;
- struct netpoll *netpoll;
struct rcu_head rcu;
};
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 9aed39abc94b..afe1d8f09d89 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -122,8 +122,6 @@ struct nfs_pageio_descriptor {
/* arbitrarily selected limit to number of mirrors */
#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16
-#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
-
extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
struct page *page,
unsigned int pgbase,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ac4bff6e9913..d56583572c98 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -862,7 +862,7 @@ struct nfs_getaclres {
size_t acl_len;
size_t acl_data_offset;
int acl_flags;
- struct page * acl_scratch;
+ struct folio * acl_scratch;
};
struct nfs_setattrres {
@@ -1596,7 +1596,7 @@ struct nfs42_listxattrsargs {
struct nfs42_listxattrsres {
struct nfs4_sequence_res seq_res;
- struct page *scratch;
+ struct folio *scratch;
void *xattr_buf;
size_t xattr_len;
u64 cookie;
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
index 5c7c92659e73..7ca2715edccc 100644
--- a/include/linux/nfslocalio.h
+++ b/include/linux/nfslocalio.h
@@ -65,6 +65,8 @@ struct nfsd_localio_operations {
struct net *(*nfsd_file_put_local)(struct nfsd_file __rcu **);
struct nfsd_file *(*nfsd_file_get_local)(struct nfsd_file *);
struct file *(*nfsd_file_file)(struct nfsd_file *);
+ void (*nfsd_file_dio_alignment)(struct nfsd_file *,
+ u32 *, u32 *, u32 *);
} ____cacheline_aligned;
extern void nfsd_localio_ops_init(void);
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 615a560d9edb..f3b13da78aac 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -103,7 +103,7 @@ struct nvmem_cell_info {
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
- * generated with ida_simple_get() and provided id field is ignored.
+ * generated with ida_alloc() and provided id field is ignored.
*
* Note: Specifying name and setting id to -1 implies a unique device
* whose name is provided as-is (kept unaltered).
diff --git a/include/linux/of.h b/include/linux/of.h
index a62154aeda1b..5e2c6ed9370a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -550,6 +550,13 @@ static inline struct device_node *of_get_next_child(
return NULL;
}
+static inline struct device_node *of_get_next_child_with_prefix(
+ const struct device_node *node, struct device_node *prev,
+ const char *prefix)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev)
{
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index a480063c9cb1..1db8543dfc8a 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -55,7 +55,6 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 bus_token);
extern void of_msi_configure(struct device *dev, const struct device_node *np);
extern u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in);
-u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline void of_irq_init(const struct of_device_id *matches)
{
@@ -105,11 +104,6 @@ static inline u32 of_msi_xlate(struct device *dev, struct device_node **msi_np,
{
return id_in;
}
-static inline u32 of_msi_map_id(struct device *dev,
- struct device_node *msi_np, u32 id_in)
-{
- return id_in;
-}
#endif
#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
diff --git a/include/linux/once.h b/include/linux/once.h
index 30346fcdc799..449a0e34ad5a 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE(func, ...) \
({ \
bool ___ret = false; \
- static bool __section(".data..once") ___done = false; \
+ static bool __section(".data..do_once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
unsigned long ___flags; \
@@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
#define DO_ONCE_SLEEPABLE(func, ...) \
({ \
bool ___ret = false; \
- static bool __section(".data..once") ___done = false; \
+ static bool __section(".data..do_once") ___done = false; \
static DEFINE_STATIC_KEY_TRUE(___once_key); \
if (static_branch_unlikely(&___once_key)) { \
___ret = __do_once_sleepable_start(&___done); \
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 1e0fc6931ce9..7b02bc1d0a7e 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -91,7 +91,7 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
*/
static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
{
- if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
+ if (unlikely(mm_flags_test(MMF_UNSTABLE, mm)))
return VM_FAULT_SIGBUS;
return 0;
}
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 154ed0dbb43f..725f95f7e416 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -239,6 +239,76 @@ static inline bool __must_check __must_check_overflow(bool overflow)
__overflows_type(n, T))
/**
+ * range_overflows() - Check if a range is out of bounds
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * A strict check to determine if the range [@start, @start + @size) is
+ * invalid with respect to the allowable range [0, @max). Any range
+ * starting at or beyond @max is considered an overflow, even if @size is 0.
+ *
+ * Returns: true if the range is out of bounds.
+ */
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ >= max__ || size__ > max__ - start__; \
+})
+
+/**
+ * range_overflows_t() - Check if a range is out of bounds
+ * @type: Data type to use.
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Same as range_overflows() but forcing the parameters to @type.
+ *
+ * Returns: true if the range is out of bounds.
+ */
+#define range_overflows_t(type, start, size, max) \
+ range_overflows((type)(start), (type)(size), (type)(max))
+
+/**
+ * range_end_overflows() - Check if a range's endpoint is out of bounds
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Checks only if the endpoint of a range (@start + @size) exceeds @max.
+ * Unlike range_overflows(), a zero-sized range at the boundary (@start == @max)
+ * is not considered an overflow. Useful for iterator-style checks.
+ *
+ * Returns: true if the endpoint exceeds the boundary.
+ */
+#define range_end_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+/**
+ * range_end_overflows_t() - Check if a range's endpoint is out of bounds
+ * @type: Data type to use.
+ * @start: Start of the range.
+ * @size: Size of the range.
+ * @max: Exclusive upper boundary.
+ *
+ * Same as range_end_overflows() but forcing the parameters to @type.
+ *
+ * Returns: true if the endpoint exceeds the boundary.
+ */
+#define range_end_overflows_t(type, start, size, max) \
+ range_end_overflows((type)(start), (type)(size), (type)(max))
+
+/**
* castable_to_type - like __same_type(), but also allows for casted literals
*
* @n: variable or constant value
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 8d3fa3a91ce4..0091ad1986bf 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -217,7 +217,7 @@ static __always_inline const struct page *page_fixed_fake_head(const struct page
* cold cacheline in some cases.
*/
if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
- test_bit(PG_head, &page->flags)) {
+ test_bit(PG_head, &page->flags.f)) {
/*
* We can safely access the field of the @page[1] with PG_head
* because the @page is a compound page composed with at least
@@ -316,7 +316,7 @@ static __always_inline unsigned long _compound_head(const struct page *page)
* check that the page number lies within @folio; the caller is presumed
* to have a reference to the page.
*/
-#define folio_page(folio, n) nth_page(&(folio)->page, n)
+#define folio_page(folio, n) (&(folio)->page + (n))
static __always_inline int PageTail(const struct page *page)
{
@@ -325,14 +325,14 @@ static __always_inline int PageTail(const struct page *page)
static __always_inline int PageCompound(const struct page *page)
{
- return test_bit(PG_head, &page->flags) ||
+ return test_bit(PG_head, &page->flags.f) ||
READ_ONCE(page->compound_head) & 1;
}
#define PAGE_POISON_PATTERN -1l
static inline int PagePoisoned(const struct page *page)
{
- return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
+ return READ_ONCE(page->flags.f) == PAGE_POISON_PATTERN;
}
#ifdef CONFIG_DEBUG_VM
@@ -349,8 +349,8 @@ static const unsigned long *const_folio_flags(const struct folio *folio,
const struct page *page = &folio->page;
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
- VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
- return &page[n].flags;
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
+ return &page[n].flags.f;
}
static unsigned long *folio_flags(struct folio *folio, unsigned n)
@@ -358,8 +358,8 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n)
struct page *page = &folio->page;
VM_BUG_ON_PGFLAGS(page->compound_head & 1, page);
- VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
- return &page[n].flags;
+ VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags.f), page);
+ return &page[n].flags.f;
}
/*
@@ -449,37 +449,37 @@ FOLIO_CLEAR_FLAG(name, page)
#define TESTPAGEFLAG(uname, lname, policy) \
FOLIO_TEST_FLAG(lname, FOLIO_##policy) \
static __always_inline int Page##uname(const struct page *page) \
-{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
+{ return test_bit(PG_##lname, &policy(page, 0)->flags.f); }
#define SETPAGEFLAG(uname, lname, policy) \
FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void SetPage##uname(struct page *page) \
-{ set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define CLEARPAGEFLAG(uname, lname, policy) \
FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void ClearPage##uname(struct page *page) \
-{ clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __SETPAGEFLAG(uname, lname, policy) \
__FOLIO_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline void __SetPage##uname(struct page *page) \
-{ __set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define __CLEARPAGEFLAG(uname, lname, policy) \
__FOLIO_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline void __ClearPage##uname(struct page *page) \
-{ __clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ __clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTSETFLAG(uname, lname, policy) \
FOLIO_TEST_SET_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestSetPage##uname(struct page *page) \
-{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_set_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define TESTCLEARFLAG(uname, lname, policy) \
FOLIO_TEST_CLEAR_FLAG(lname, FOLIO_##policy) \
static __always_inline int TestClearPage##uname(struct page *page) \
-{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
+{ return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags.f); }
#define PAGEFLAG(uname, lname, policy) \
TESTPAGEFLAG(uname, lname, policy) \
@@ -618,6 +618,7 @@ FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE)
#else
PAGEFLAG_FALSE(HighMem, highmem)
#endif
+#define PhysHighMem(__p) (PageHighMem(phys_to_page(__p)))
/* Does kmap_local_folio() only allow access to one page of the folio? */
#ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
@@ -846,7 +847,7 @@ static __always_inline bool folio_test_head(const struct folio *folio)
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
- return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
+ return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
}
__SETPAGEFLAG(Head, head, PF_ANY)
@@ -1170,28 +1171,28 @@ static __always_inline int PageAnonExclusive(const struct page *page)
*/
if (PageHuge(page))
page = compound_head(page);
- return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void SetPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
- set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void ClearPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
- clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
static __always_inline void __ClearPageAnonExclusive(struct page *page)
{
VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
- __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
+ __clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}
#ifdef CONFIG_MMU
@@ -1241,7 +1242,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
*/
static inline int folio_has_private(const struct folio *folio)
{
- return !!(folio->flags & PAGE_FLAGS_PRIVATE);
+ return !!(folio->flags.f & PAGE_FLAGS_PRIVATE);
}
#undef PF_ANY
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 6a44be0f39f4..e046278a01fa 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -13,12 +13,11 @@
#include <linux/types.h>
-#define PB_migratetype_bits 3
/* Bit indices that affect a whole block of pages */
enum pageblock_bits {
- PB_migrate,
- PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
- /* 3 bits required for migrate types */
+ PB_migrate_0,
+ PB_migrate_1,
+ PB_migrate_2,
PB_compact_skip,/* If set the block is skipped by compaction */
#ifdef CONFIG_MEMORY_ISOLATION
@@ -37,11 +36,10 @@ enum pageblock_bits {
#define NR_PAGEBLOCK_BITS (roundup_pow_of_two(__NR_PAGEBLOCK_BITS))
-#define MIGRATETYPE_MASK ((1UL << (PB_migrate_end + 1)) - 1)
+#define MIGRATETYPE_MASK (BIT(PB_migrate_0)|BIT(PB_migrate_1)|BIT(PB_migrate_2))
#ifdef CONFIG_MEMORY_ISOLATION
-#define MIGRATETYPE_AND_ISO_MASK \
- (((1UL << (PB_migrate_end + 1)) - 1) | BIT(PB_migrate_isolate))
+#define MIGRATETYPE_AND_ISO_MASK (MIGRATETYPE_MASK | BIT(PB_migrate_isolate))
#else
#define MIGRATETYPE_AND_ISO_MASK MIGRATETYPE_MASK
#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 12a12dae727d..09b581c1d878 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -140,7 +140,7 @@ static inline int inode_drain_writes(struct inode *inode)
return filemap_write_and_wait(inode->i_mapping);
}
-static inline bool mapping_empty(struct address_space *mapping)
+static inline bool mapping_empty(const struct address_space *mapping)
{
return xa_empty(&mapping->i_pages);
}
@@ -166,7 +166,7 @@ static inline bool mapping_empty(struct address_space *mapping)
* refcount and the referenced bit, which will be elevated or set in
* the process of adding new cache pages to an inode.
*/
-static inline bool mapping_shrinkable(struct address_space *mapping)
+static inline bool mapping_shrinkable(const struct address_space *mapping)
{
void *head;
@@ -211,6 +211,8 @@ enum mapping_flags {
folio contents */
AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */
AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
+ AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
+ account usage to user cgroups */
/* Bits 16-25 are used for FOLIO_ORDER */
AS_FOLIO_ORDER_BITS = 5,
AS_FOLIO_ORDER_MIN = 16,
@@ -265,7 +267,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
clear_bit(AS_UNEVICTABLE, &mapping->flags);
}
-static inline bool mapping_unevictable(struct address_space *mapping)
+static inline bool mapping_unevictable(const struct address_space *mapping)
{
return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
}
@@ -275,7 +277,7 @@ static inline void mapping_set_exiting(struct address_space *mapping)
set_bit(AS_EXITING, &mapping->flags);
}
-static inline int mapping_exiting(struct address_space *mapping)
+static inline int mapping_exiting(const struct address_space *mapping)
{
return test_bit(AS_EXITING, &mapping->flags);
}
@@ -285,7 +287,7 @@ static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
-static inline int mapping_use_writeback_tags(struct address_space *mapping)
+static inline int mapping_use_writeback_tags(const struct address_space *mapping)
{
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
@@ -331,7 +333,7 @@ static inline void mapping_set_inaccessible(struct address_space *mapping)
set_bit(AS_INACCESSIBLE, &mapping->flags);
}
-static inline bool mapping_inaccessible(struct address_space *mapping)
+static inline bool mapping_inaccessible(const struct address_space *mapping)
{
return test_bit(AS_INACCESSIBLE, &mapping->flags);
}
@@ -341,18 +343,18 @@ static inline void mapping_set_writeback_may_deadlock_on_reclaim(struct address_
set_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline bool mapping_writeback_may_deadlock_on_reclaim(struct address_space *mapping)
+static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct address_space *mapping)
{
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
}
/* Restricts the given gfp_mask to what the mapping allows. */
-static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
+static inline gfp_t mapping_gfp_constraint(const struct address_space *mapping,
gfp_t gfp_mask)
{
return mapping_gfp_mask(mapping) & gfp_mask;
@@ -475,11 +477,17 @@ mapping_min_folio_order(const struct address_space *mapping)
}
static inline unsigned long
-mapping_min_folio_nrpages(struct address_space *mapping)
+mapping_min_folio_nrpages(const struct address_space *mapping)
{
return 1UL << mapping_min_folio_order(mapping);
}
+static inline unsigned long
+mapping_min_folio_nrbytes(const struct address_space *mapping)
+{
+ return mapping_min_folio_nrpages(mapping) << PAGE_SHIFT;
+}
+
/**
* mapping_align_index() - Align index for this mapping.
* @mapping: The address_space.
@@ -489,7 +497,7 @@ mapping_min_folio_nrpages(struct address_space *mapping)
* new folio to the page cache and need to know what index to give it,
* call this function.
*/
-static inline pgoff_t mapping_align_index(struct address_space *mapping,
+static inline pgoff_t mapping_align_index(const struct address_space *mapping,
pgoff_t index)
{
return round_down(index, mapping_min_folio_nrpages(mapping));
@@ -499,7 +507,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
* Large folio support currently depends on THP. These dependencies are
* being worked on but are not yet fixed.
*/
-static inline bool mapping_large_folio_support(struct address_space *mapping)
+static inline bool mapping_large_folio_support(const struct address_space *mapping)
{
/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
@@ -514,7 +522,7 @@ static inline size_t mapping_max_folio_size(const struct address_space *mapping)
return PAGE_SIZE << mapping_max_folio_order(mapping);
}
-static inline int filemap_nr_thps(struct address_space *mapping)
+static inline int filemap_nr_thps(const struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
return atomic_read(&mapping->nr_thps);
@@ -543,7 +551,7 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
#endif
}
-struct address_space *folio_mapping(struct folio *);
+struct address_space *folio_mapping(const struct folio *folio);
/**
* folio_flush_mapping - Find the file mapping this folio belongs to.
@@ -928,7 +936,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
*
* Return: The index of the folio which follows this folio in the file.
*/
-static inline pgoff_t folio_next_index(struct folio *folio)
+static inline pgoff_t folio_next_index(const struct folio *folio)
{
return folio->index + folio_nr_pages(folio);
}
@@ -957,7 +965,7 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
* e.g., shmem did not move this folio to the swap cache.
* Return: true or false.
*/
-static inline bool folio_contains(struct folio *folio, pgoff_t index)
+static inline bool folio_contains(const struct folio *folio, pgoff_t index)
{
VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
return index - folio->index < folio_nr_pages(folio);
@@ -1034,13 +1042,13 @@ static inline loff_t page_offset(struct page *page)
/*
* Get the offset in PAGE_SIZE (even for hugetlb folios).
*/
-static inline pgoff_t folio_pgoff(struct folio *folio)
+static inline pgoff_t folio_pgoff(const struct folio *folio)
{
return folio->index;
}
-static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
- unsigned long address)
+static inline pgoff_t linear_page_index(const struct vm_area_struct *vma,
+ const unsigned long address)
{
pgoff_t pgoff;
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
@@ -1221,6 +1229,8 @@ void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
void end_page_writeback(struct page *page);
void folio_end_writeback(struct folio *folio);
+void folio_end_writeback_no_dropbehind(struct folio *folio);
+void folio_end_dropbehind(struct folio *folio);
void folio_wait_stable(struct folio *folio);
void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
@@ -1460,7 +1470,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
* readahead_pos - The byte offset into the file of this readahead request.
* @rac: The readahead request.
*/
-static inline loff_t readahead_pos(struct readahead_control *rac)
+static inline loff_t readahead_pos(const struct readahead_control *rac)
{
return (loff_t)rac->_index * PAGE_SIZE;
}
@@ -1469,7 +1479,7 @@ static inline loff_t readahead_pos(struct readahead_control *rac)
* readahead_length - The number of bytes in this readahead request.
* @rac: The readahead request.
*/
-static inline size_t readahead_length(struct readahead_control *rac)
+static inline size_t readahead_length(const struct readahead_control *rac)
{
return rac->_nr_pages * PAGE_SIZE;
}
@@ -1478,7 +1488,7 @@ static inline size_t readahead_length(struct readahead_control *rac)
* readahead_index - The index of the first page in this readahead request.
* @rac: The readahead request.
*/
-static inline pgoff_t readahead_index(struct readahead_control *rac)
+static inline pgoff_t readahead_index(const struct readahead_control *rac)
{
return rac->_index;
}
@@ -1487,7 +1497,7 @@ static inline pgoff_t readahead_index(struct readahead_control *rac)
* readahead_count - The number of pages in this readahead request.
* @rac: The readahead request.
*/
-static inline unsigned int readahead_count(struct readahead_control *rac)
+static inline unsigned int readahead_count(const struct readahead_control *rac)
{
return rac->_nr_pages;
}
@@ -1496,12 +1506,12 @@ static inline unsigned int readahead_count(struct readahead_control *rac)
* readahead_batch_length - The number of bytes in the current batch.
* @rac: The readahead request.
*/
-static inline size_t readahead_batch_length(struct readahead_control *rac)
+static inline size_t readahead_batch_length(const struct readahead_control *rac)
{
return rac->_batch_count * PAGE_SIZE;
}
-static inline unsigned long dir_pages(struct inode *inode)
+static inline unsigned long dir_pages(const struct inode *inode)
{
return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT;
@@ -1515,8 +1525,8 @@ static inline unsigned long dir_pages(struct inode *inode)
* Return: the number of bytes in the folio up to EOF,
* or -EFAULT if the folio was truncated.
*/
-static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
- struct inode *inode)
+static inline ssize_t folio_mkwrite_check_truncate(const struct folio *folio,
+ const struct inode *inode)
{
loff_t size = i_size_read(inode);
pgoff_t index = size >> PAGE_SHIFT;
@@ -1547,7 +1557,8 @@ static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
* Return: The number of filesystem blocks covered by this folio.
*/
static inline
-unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
+unsigned int i_blocks_per_folio(const struct inode *inode,
+ const struct folio *folio)
{
return folio_size(folio) >> inode->i_blkbits;
}
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 5d3a0cccc6bf..63be5a451627 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -51,12 +51,12 @@ static inline void folio_batch_reinit(struct folio_batch *fbatch)
fbatch->i = 0;
}
-static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
+static inline unsigned int folio_batch_count(const struct folio_batch *fbatch)
{
return fbatch->nr;
}
-static inline unsigned int folio_batch_space(struct folio_batch *fbatch)
+static inline unsigned int folio_batch_space(const struct folio_batch *fbatch)
{
return PAGEVEC_SIZE - fbatch->nr;
}
diff --git a/include/linux/panic.h b/include/linux/panic.h
index 7be742628c25..6f972a66c13e 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -43,6 +43,12 @@ void abort(void);
extern atomic_t panic_cpu;
#define PANIC_CPU_INVALID -1
+bool panic_try_start(void);
+void panic_reset(void);
+bool panic_in_progress(void);
+bool panic_on_this_cpu(void);
+bool panic_on_other_cpu(void);
+
/*
* Only to be used by arch init code. If the user over-wrote the default
* CONFIG_PANIC_TIMEOUT, honor it.
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index 8a7f4f802c57..38a82d65e58e 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -107,7 +107,8 @@ static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref,
if (static_key_enabled(&mem_profiling_compressed)) {
pgalloc_tag_idx idx;
- idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask;
+ idx = (page->flags.f >> alloc_tag_ref_offs) &
+ alloc_tag_ref_mask;
idx_to_ref(idx, ref);
handle->page = page;
} else {
@@ -149,11 +150,11 @@ static inline void update_page_tag_ref(union pgtag_ref_handle handle, union code
idx = (unsigned long)ref_to_idx(ref);
idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs;
do {
- old_flags = READ_ONCE(page->flags);
+ old_flags = READ_ONCE(page->flags.f);
flags = old_flags;
flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs);
flags |= idx;
- } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
+ } while (unlikely(!try_cmpxchg(&page->flags.f, &old_flags, flags)));
} else {
if (WARN_ON(!handle.ref || !ref))
return;
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 25a7257052ff..32e8457ad535 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1975,6 +1975,32 @@ static inline bool arch_has_pfn_modify_check(void)
/* Page-Table Modification Mask */
typedef unsigned int pgtbl_mod_mask;
+enum pgtable_level {
+ PGTABLE_LEVEL_PTE = 0,
+ PGTABLE_LEVEL_PMD,
+ PGTABLE_LEVEL_PUD,
+ PGTABLE_LEVEL_P4D,
+ PGTABLE_LEVEL_PGD,
+};
+
+static inline const char *pgtable_level_to_str(enum pgtable_level level)
+{
+ switch (level) {
+ case PGTABLE_LEVEL_PTE:
+ return "pte";
+ case PGTABLE_LEVEL_PMD:
+ return "pmd";
+ case PGTABLE_LEVEL_PUD:
+ return "pud";
+ case PGTABLE_LEVEL_P4D:
+ return "p4d";
+ case PGTABLE_LEVEL_PGD:
+ return "pgd";
+ default:
+ return "unknown";
+ }
+}
+
#endif /* !__ASSEMBLY__ */
#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index bb45787d8684..7a54a8b4d277 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -169,6 +169,11 @@ static inline bool phy_interface_empty(const unsigned long *intf)
return bitmap_empty(intf, PHY_INTERFACE_MODE_MAX);
}
+static inline void phy_interface_copy(unsigned long *d, const unsigned long *s)
+{
+ bitmap_copy(d, s, PHY_INTERFACE_MODE_MAX);
+}
+
static inline unsigned int phy_interface_weight(const unsigned long *intf)
{
return bitmap_weight(intf, PHY_INTERFACE_MODE_MAX);
@@ -1273,9 +1278,13 @@ struct phy_driver {
#define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)
-#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
-#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
-#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+#define PHY_ID_MATCH_EXTACT_MASK GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL_MASK GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR_MASK GENMASK(31, 10)
+
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_EXTACT_MASK
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_MODEL_MASK
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = PHY_ID_MATCH_VENDOR_MASK
/**
* phy_id_compare - compare @id1 with @id2 taking account of @mask
@@ -1292,6 +1301,32 @@ static inline bool phy_id_compare(u32 id1, u32 id2, u32 mask)
}
/**
+ * phy_id_compare_vendor - compare @id with @vendor mask
+ * @id: PHY ID
+ * @vendor_mask: PHY Vendor mask
+ *
+ * Return: true if the bits from @id match @vendor using the
+ * generic PHY Vendor mask.
+ */
+static inline bool phy_id_compare_vendor(u32 id, u32 vendor_mask)
+{
+ return phy_id_compare(id, vendor_mask, PHY_ID_MATCH_VENDOR_MASK);
+}
+
+/**
+ * phy_id_compare_model - compare @id with @model mask
+ * @id: PHY ID
+ * @model_mask: PHY Model mask
+ *
+ * Return: true if the bits from @id match @model using the
+ * generic PHY Model mask.
+ */
+static inline bool phy_id_compare_model(u32 id, u32 model_mask)
+{
+ return phy_id_compare(id, model_mask, PHY_ID_MATCH_MODEL_MASK);
+}
+
+/**
* phydev_id_compare - compare @id with the PHY's Clause 22 ID
* @phydev: the PHY device
* @id: the PHY ID to be matched
@@ -1995,9 +2030,7 @@ static inline int phy_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
-void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
-int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_error(struct phy_device *phydev);
@@ -2099,16 +2132,6 @@ int __phy_hwtstamp_set(struct phy_device *phydev,
extern const struct bus_type mdio_bus_type;
extern const struct class mdio_bus_class;
-struct mdio_board_info {
- const char *bus_id;
- char modalias[MDIO_NAME_SIZE];
- int mdio_addr;
- const void *platform_data;
-};
-
-int mdiobus_register_board_info(const struct mdio_board_info *info,
- unsigned int n);
-
/**
* phy_module_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 5399b9e41e35..d17ff750c708 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -17,7 +17,7 @@ struct net_device;
#if IS_ENABLED(CONFIG_FIXED_PHY)
extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
-int fixed_phy_add(int phy_id, const struct fixed_phy_status *status);
+void fixed_phy_add(const struct fixed_phy_status *status);
struct phy_device *fixed_phy_register(const struct fixed_phy_status *status,
struct device_node *np);
@@ -26,11 +26,7 @@ extern int fixed_phy_set_link_update(struct phy_device *phydev,
int (*link_update)(struct net_device *,
struct fixed_phy_status *));
#else
-static inline int fixed_phy_add(int phy_id,
- const struct fixed_phy_status *status)
-{
- return -ENODEV;
-}
+static inline void fixed_phy_add(const struct fixed_phy_status *status) {}
static inline struct phy_device *
fixed_phy_register(const struct fixed_phy_status *status,
struct device_node *np)
@@ -41,16 +37,6 @@ fixed_phy_register(const struct fixed_phy_status *status,
static inline void fixed_phy_unregister(struct phy_device *phydev)
{
}
-static inline int fixed_phy_set_link_update(struct phy_device *phydev,
- int (*link_update)(struct net_device *,
- struct fixed_phy_status *))
-{
- return -ENODEV;
-}
-static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
-{
- return -EINVAL;
-}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 30659b615fca..9af0411761d7 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -320,9 +320,8 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* If in 802.3z mode, the link speed is fixed, dependent on the
* @state->interface. Duplex and pause modes are negotiated via
* the in-band configuration word. Advertised pause modes are set
- * according to the @state->an_enabled and @state->advertising
- * flags. Beware of MACs which only support full duplex at gigabit
- * and higher speeds.
+ * according to @state->advertising. Beware of MACs which only
+ * support full duplex at gigabit and higher speeds.
*
* If in Cisco SGMII mode, the link speed and duplex mode are passed
* in the serial bitstream 16-bit configuration word, and the MAC
@@ -331,7 +330,7 @@ int mac_prepare(struct phylink_config *config, unsigned int mode,
* responsible for reading the configuration word and configuring
* itself accordingly.
*
- * Valid state members: interface, an_enabled, pause, advertising.
+ * Valid state members: interface, pause, advertising.
*
* Implementations are expected to update the MAC to reflect the
* requested settings - i.o.w., if nothing has changed between two
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 1bcf071b860e..d9245ecec71d 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -88,9 +88,13 @@ struct pinctrl_map;
* passed in the argument on a custom form, else just use argument 1
* to indicate low power mode, argument 0 turns low power mode off.
* @PIN_CONFIG_MODE_PWM: this will configure the pin for PWM
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
- * value on the line. Use argument 1 to indicate high level, argument 0 to
- * indicate low level. (Please see Documentation/driver-api/pin-control.rst,
+ * @PIN_CONFIG_LEVEL: setting this will configure the pin as an output and
+ * drive a value on the line. Use argument 1 to indicate high level,
+ * argument 0 to indicate low level. Conversely the value of the line
+ * can be read using this parameter, if and only if that value can be
+ * represented as a binary 0 or 1 where 0 indicate a low voltage level
+ * and 1 indicate a high voltage level.
+ * (Please see Documentation/driver-api/pin-control.rst,
* section "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
* without driving a value there. For most platforms this reduces to
@@ -137,7 +141,7 @@ enum pin_config_param {
PIN_CONFIG_INPUT_SCHMITT_UV,
PIN_CONFIG_MODE_LOW_POWER,
PIN_CONFIG_MODE_PWM,
- PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_LEVEL,
PIN_CONFIG_OUTPUT_ENABLE,
PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
PIN_CONFIG_PERSIST_STATE,
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index d138e1815645..1a8084e29405 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -11,6 +11,7 @@
#ifndef __LINUX_PINCTRL_PINCTRL_H
#define __LINUX_PINCTRL_PINCTRL_H
+#include <linux/bits.h>
#include <linux/types.h>
struct device;
@@ -206,16 +207,20 @@ extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
const char *pin_group, const unsigned int **pins,
unsigned int *num_pins);
+#define PINFUNCTION_FLAG_GPIO BIT(0)
+
/**
* struct pinfunction - Description about a function
* @name: Name of the function
* @groups: An array of groups for this function
* @ngroups: Number of groups in @groups
+ * @flags: Additional pin function flags
*/
struct pinfunction {
const char *name;
const char * const *groups;
size_t ngroups;
+ unsigned long flags;
};
/* Convenience macro to define a single named pinfunction */
@@ -226,6 +231,15 @@ struct pinfunction {
.ngroups = (_ngroups), \
}
+/* Same as PINCTRL_PINFUNCTION() but for the GPIO category of functions */
+#define PINCTRL_GPIO_PINFUNCTION(_name, _groups, _ngroups) \
+(struct pinfunction) { \
+ .name = (_name), \
+ .groups = (_groups), \
+ .ngroups = (_ngroups), \
+ .flags = PINFUNCTION_FLAG_GPIO, \
+ }
+
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PINCTRL)
extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
#else
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index d6f7b58d6ad0..6db6c3e1ccc2 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -66,6 +66,8 @@ struct pinmux_ops {
unsigned int selector,
const char * const **groups,
unsigned int *num_groups);
+ bool (*function_is_gpio) (struct pinctrl_dev *pctldev,
+ unsigned int selector);
int (*set_mux) (struct pinctrl_dev *pctldev, unsigned int func_selector,
unsigned int group_selector);
int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cf477beae4bb..789406d95e69 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -98,6 +98,25 @@ struct dev_pm_opp_data {
unsigned long u_volt;
};
+/**
+ * struct dev_pm_opp_key - Key used to identify OPP entries
+ * @freq: Frequency in Hz. Use 0 if frequency is not to be matched.
+ * @level: Performance level associated with the OPP entry.
+ * Use OPP_LEVEL_UNSET if level is not to be matched.
+ * @bw: Bandwidth associated with the OPP entry.
+ * Use 0 if bandwidth is not to be matched.
+ *
+ * This structure is used to uniquely identify an OPP entry based on
+ * frequency, performance level, and bandwidth. Each field can be
+ * selectively ignored during matching by setting it to its respective
+ * NOP value.
+ */
+struct dev_pm_opp_key {
+ unsigned long freq;
+ unsigned int level;
+ u32 bw;
+};
+
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
@@ -131,6 +150,10 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq,
bool available);
+struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available);
+
struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available);
@@ -289,6 +312,13 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_key_exact(struct device *dev,
+ struct dev_pm_opp_key *key,
+ bool available)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available)
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 8ca2235f78d5..299e2dd7da6d 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -90,4 +90,7 @@
/********** lib/stackdepot.c **********/
#define STACK_DEPOT_POISON ((void *)(0xD390 + POISON_POINTER_DELTA))
+/********** io_uring/ **********/
+#define IO_URING_PTR_POISON ((void *)(0x1091UL + POISON_POINTER_DELTA))
+
#endif
diff --git a/include/linux/power/max77705_charger.h b/include/linux/power/max77705_charger.h
index fdec9af9c541..6653abfdf747 100644
--- a/include/linux/power/max77705_charger.h
+++ b/include/linux/power/max77705_charger.h
@@ -9,35 +9,27 @@
#ifndef __MAX77705_CHARGER_H
#define __MAX77705_CHARGER_H __FILE__
+#include <linux/regmap.h>
+
/* MAX77705_CHG_REG_CHG_INT */
-#define MAX77705_BYP_I BIT(0)
-#define MAX77705_INP_LIMIT_I BIT(1)
-#define MAX77705_BATP_I BIT(2)
-#define MAX77705_BAT_I BIT(3)
-#define MAX77705_CHG_I BIT(4)
-#define MAX77705_WCIN_I BIT(5)
-#define MAX77705_CHGIN_I BIT(6)
-#define MAX77705_AICL_I BIT(7)
-
-/* MAX77705_CHG_REG_CHG_INT_MASK */
-#define MAX77705_BYP_IM BIT(0)
-#define MAX77705_INP_LIMIT_IM BIT(1)
-#define MAX77705_BATP_IM BIT(2)
-#define MAX77705_BAT_IM BIT(3)
-#define MAX77705_CHG_IM BIT(4)
-#define MAX77705_WCIN_IM BIT(5)
-#define MAX77705_CHGIN_IM BIT(6)
-#define MAX77705_AICL_IM BIT(7)
+#define MAX77705_BYP_I (0)
+#define MAX77705_INP_LIMIT_I (1)
+#define MAX77705_BATP_I (2)
+#define MAX77705_BAT_I (3)
+#define MAX77705_CHG_I (4)
+#define MAX77705_WCIN_I (5)
+#define MAX77705_CHGIN_I (6)
+#define MAX77705_AICL_I (7)
/* MAX77705_CHG_REG_CHG_INT_OK */
-#define MAX77705_BYP_OK BIT(0)
-#define MAX77705_DISQBAT_OK BIT(1)
-#define MAX77705_BATP_OK BIT(2)
-#define MAX77705_BAT_OK BIT(3)
-#define MAX77705_CHG_OK BIT(4)
-#define MAX77705_WCIN_OK BIT(5)
-#define MAX77705_CHGIN_OK BIT(6)
-#define MAX77705_AICL_OK BIT(7)
+#define MAX77705_BYP_OK BIT(MAX77705_BYP_I)
+#define MAX77705_DISQBAT_OK BIT(MAX77705_INP_LIMIT_I)
+#define MAX77705_BATP_OK BIT(MAX77705_BATP_I)
+#define MAX77705_BAT_OK BIT(MAX77705_BAT_I)
+#define MAX77705_CHG_OK BIT(MAX77705_CHG_I)
+#define MAX77705_WCIN_OK BIT(MAX77705_WCIN_I)
+#define MAX77705_CHGIN_OK BIT(MAX77705_CHGIN_I)
+#define MAX77705_AICL_OK BIT(MAX77705_AICL_I)
/* MAX77705_CHG_REG_DETAILS_00 */
#define MAX77705_BATP_DTLS BIT(0)
@@ -63,7 +55,6 @@
#define MAX77705_BUCK_SHIFT 2
#define MAX77705_BOOST_SHIFT 3
#define MAX77705_WDTEN_SHIFT 4
-#define MAX77705_MODE_MASK GENMASK(3, 0)
#define MAX77705_CHG_MASK BIT(MAX77705_CHG_SHIFT)
#define MAX77705_UNO_MASK BIT(MAX77705_UNO_SHIFT)
#define MAX77705_OTG_MASK BIT(MAX77705_OTG_SHIFT)
@@ -74,34 +65,19 @@
#define MAX77705_OTG_CTRL (MAX77705_OTG_MASK | MAX77705_BOOST_MASK)
/* MAX77705_CHG_REG_CNFG_01 */
-#define MAX77705_FCHGTIME_SHIFT 0
-#define MAX77705_FCHGTIME_MASK GENMASK(2, 0)
-#define MAX77705_CHG_RSTRT_SHIFT 4
-#define MAX77705_CHG_RSTRT_MASK GENMASK(5, 4)
#define MAX77705_FCHGTIME_DISABLE 0
#define MAX77705_CHG_RSTRT_DISABLE 0x3
-#define MAX77705_PQEN_SHIFT 7
-#define MAX77705_PQEN_MASK BIT(7)
#define MAX77705_CHG_PQEN_DISABLE 0
#define MAX77705_CHG_PQEN_ENABLE 1
/* MAX77705_CHG_REG_CNFG_02 */
-#define MAX77705_OTG_ILIM_SHIFT 6
-#define MAX77705_OTG_ILIM_MASK GENMASK(7, 6)
#define MAX77705_OTG_ILIM_500 0
#define MAX77705_OTG_ILIM_900 1
#define MAX77705_OTG_ILIM_1200 2
#define MAX77705_OTG_ILIM_1500 3
-#define MAX77705_CHG_CC GENMASK(5, 0)
/* MAX77705_CHG_REG_CNFG_03 */
-#define MAX77705_TO_ITH_SHIFT 0
-#define MAX77705_TO_ITH_MASK GENMASK(2, 0)
-#define MAX77705_TO_TIME_SHIFT 3
-#define MAX77705_TO_TIME_MASK GENMASK(5, 3)
-#define MAX77705_SYS_TRACK_DIS_SHIFT 7
-#define MAX77705_SYS_TRACK_DIS_MASK BIT(7)
#define MAX77705_TO_ITH_150MA 0
#define MAX77705_TO_TIME_30M 3
#define MAX77705_SYS_TRACK_ENABLE 0
@@ -110,15 +86,8 @@
/* MAX77705_CHG_REG_CNFG_04 */
#define MAX77705_CHG_MINVSYS_SHIFT 6
#define MAX77705_CHG_MINVSYS_MASK GENMASK(7, 6)
-#define MAX77705_CHG_PRM_SHIFT 0
-#define MAX77705_CHG_PRM_MASK GENMASK(5, 0)
-
-#define MAX77705_CHG_CV_PRM_SHIFT 0
-#define MAX77705_CHG_CV_PRM_MASK GENMASK(5, 0)
/* MAX77705_CHG_REG_CNFG_05 */
-#define MAX77705_REG_B2SOVRC_SHIFT 0
-#define MAX77705_REG_B2SOVRC_MASK GENMASK(3, 0)
#define MAX77705_B2SOVRC_DISABLE 0
#define MAX77705_B2SOVRC_4_5A 6
#define MAX77705_B2SOVRC_4_8A 8
@@ -128,9 +97,8 @@
#define MAX77705_WDTCLR_SHIFT 0
#define MAX77705_WDTCLR_MASK GENMASK(1, 0)
#define MAX77705_WDTCLR 1
-#define MAX77705_CHGPROT_MASK GENMASK(3, 2)
-#define MAX77705_CHGPROT_UNLOCKED GENMASK(3, 2)
-#define MAX77705_SLOWEST_LX_SLOPE GENMASK(6, 5)
+#define MAX77705_CHGPROT_UNLOCKED 3
+#define MAX77705_SLOWEST_LX_SLOPE 3
/* MAX77705_CHG_REG_CNFG_07 */
#define MAX77705_CHG_FMBST 4
@@ -140,36 +108,14 @@
#define MAX77705_REG_FGSRC_MASK BIT(MAX77705_REG_FGSRC_SHIFT)
/* MAX77705_CHG_REG_CNFG_08 */
-#define MAX77705_REG_FSW_SHIFT 0
-#define MAX77705_REG_FSW_MASK GENMASK(1, 0)
#define MAX77705_CHG_FSW_3MHz 0
#define MAX77705_CHG_FSW_2MHz 1
#define MAX77705_CHG_FSW_1_5MHz 2
/* MAX77705_CHG_REG_CNFG_09 */
-#define MAX77705_CHG_CHGIN_LIM_MASK GENMASK(6, 0)
-#define MAX77705_CHG_EN_MASK BIT(7)
#define MAX77705_CHG_DISABLE 0
-#define MAX77705_CHARGER_CHG_CHARGING(_reg) \
- (((_reg) & MAX77705_CHG_EN_MASK) > 1)
-
-
-/* MAX77705_CHG_REG_CNFG_10 */
-#define MAX77705_CHG_WCIN_LIM GENMASK(5, 0)
-
-/* MAX77705_CHG_REG_CNFG_11 */
-#define MAX77705_VBYPSET_SHIFT 0
-#define MAX77705_VBYPSET_MASK GENMASK(6, 0)
/* MAX77705_CHG_REG_CNFG_12 */
-#define MAX77705_CHGINSEL_SHIFT 5
-#define MAX77705_CHGINSEL_MASK BIT(MAX77705_CHGINSEL_SHIFT)
-#define MAX77705_WCINSEL_SHIFT 6
-#define MAX77705_WCINSEL_MASK BIT(MAX77705_WCINSEL_SHIFT)
-#define MAX77705_VCHGIN_REG_MASK GENMASK(4, 3)
-#define MAX77705_WCIN_REG_MASK GENMASK(2, 1)
-#define MAX77705_REG_DISKIP_SHIFT 0
-#define MAX77705_REG_DISKIP_MASK BIT(MAX77705_REG_DISKIP_SHIFT)
/* REG=4.5V, UVLO=4.7V */
#define MAX77705_VCHGIN_4_5 0
/* REG=4.5V, UVLO=4.7V */
@@ -183,9 +129,59 @@
#define MAX77705_CURRENT_CHGIN_MIN 100000
#define MAX77705_CURRENT_CHGIN_MAX 3200000
+enum max77705_field_idx {
+ MAX77705_CHGPROT,
+ MAX77705_CHG_EN,
+ MAX77705_CHG_CC_LIM,
+ MAX77705_CHG_CHGIN_LIM,
+ MAX77705_CHG_CV_PRM,
+ MAX77705_CHG_PQEN,
+ MAX77705_CHG_RSTRT,
+ MAX77705_CHG_WCIN,
+ MAX77705_FCHGTIME,
+ MAX77705_LX_SLOPE,
+ MAX77705_MODE,
+ MAX77705_OTG_ILIM,
+ MAX77705_REG_B2SOVRC,
+ MAX77705_REG_DISKIP,
+ MAX77705_REG_FSW,
+ MAX77705_SYS_TRACK,
+ MAX77705_TO,
+ MAX77705_TO_TIME,
+ MAX77705_VBYPSET,
+ MAX77705_VCHGIN,
+ MAX77705_WCIN,
+ MAX77705_N_REGMAP_FIELDS,
+};
+
+static const struct reg_field max77705_reg_field[MAX77705_N_REGMAP_FIELDS] = {
+ [MAX77705_MODE] = REG_FIELD(MAX77705_CHG_REG_CNFG_00, 0, 3),
+ [MAX77705_FCHGTIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 0, 2),
+ [MAX77705_CHG_RSTRT] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 4, 5),
+ [MAX77705_CHG_PQEN] = REG_FIELD(MAX77705_CHG_REG_CNFG_01, 7, 7),
+ [MAX77705_CHG_CC_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 0, 5),
+ [MAX77705_OTG_ILIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_02, 6, 7),
+ [MAX77705_TO] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 0, 2),
+ [MAX77705_TO_TIME] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 3, 5),
+ [MAX77705_SYS_TRACK] = REG_FIELD(MAX77705_CHG_REG_CNFG_03, 7, 7),
+ [MAX77705_CHG_CV_PRM] = REG_FIELD(MAX77705_CHG_REG_CNFG_04, 0, 5),
+ [MAX77705_REG_B2SOVRC] = REG_FIELD(MAX77705_CHG_REG_CNFG_05, 0, 3),
+ [MAX77705_CHGPROT] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 2, 3),
+ [MAX77705_LX_SLOPE] = REG_FIELD(MAX77705_CHG_REG_CNFG_06, 5, 6),
+ [MAX77705_REG_FSW] = REG_FIELD(MAX77705_CHG_REG_CNFG_08, 0, 1),
+ [MAX77705_CHG_CHGIN_LIM] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 0, 6),
+ [MAX77705_CHG_EN] = REG_FIELD(MAX77705_CHG_REG_CNFG_09, 7, 7),
+ [MAX77705_CHG_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_10, 0, 5),
+ [MAX77705_VBYPSET] = REG_FIELD(MAX77705_CHG_REG_CNFG_11, 0, 6),
+ [MAX77705_REG_DISKIP] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 0, 0),
+ [MAX77705_WCIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 1, 2),
+ [MAX77705_VCHGIN] = REG_FIELD(MAX77705_CHG_REG_CNFG_12, 3, 4),
+};
+
struct max77705_charger_data {
struct device *dev;
struct regmap *regmap;
+ struct regmap_field *rfield[MAX77705_N_REGMAP_FIELDS];
struct power_supply_battery_info *bat_info;
struct workqueue_struct *wqueue;
struct work_struct chgin_work;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index f21f806bfb38..360ffdf272da 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -176,6 +176,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
POWER_SUPPLY_PROP_MANUFACTURE_MONTH,
POWER_SUPPLY_PROP_MANUFACTURE_DAY,
+ POWER_SUPPLY_PROP_INTERNAL_RESISTANCE,
+ POWER_SUPPLY_PROP_STATE_OF_HEALTH,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 5d22b803f51e..45c663124c9b 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -330,8 +330,6 @@ static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
#endif
-bool this_cpu_in_panic(void);
-
#ifdef CONFIG_SMP
extern int __printk_cpu_sync_try_get(void);
extern void __printk_cpu_sync_wait(void);
diff --git a/include/linux/property.h b/include/linux/property.h
index d1e80b3c9918..50b26589dd70 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -176,6 +176,16 @@ struct fwnode_handle *fwnode_get_next_available_child_node(
for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\
child = fwnode_get_next_available_child_node(fwnode, child))
+#define fwnode_for_each_child_node_scoped(fwnode, child) \
+ for (struct fwnode_handle *child __free(fwnode_handle) = \
+ fwnode_get_next_child_node(fwnode, NULL); \
+ child; child = fwnode_get_next_child_node(fwnode, child))
+
+#define fwnode_for_each_available_child_node_scoped(fwnode, child) \
+ for (struct fwnode_handle *child __free(fwnode_handle) = \
+ fwnode_get_next_available_child_node(fwnode, NULL); \
+ child; child = fwnode_get_next_available_child_node(fwnode, child))
+
struct fwnode_handle *device_get_next_child_node(const struct device *dev,
struct fwnode_handle *child);
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 3d089bd4d5e9..884364596dd3 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -67,6 +67,8 @@ struct ptp_system_timestamp {
* @n_ext_ts: The number of external time stamp channels.
* @n_per_out: The number of programmable periodic signals.
* @n_pins: The number of programmable pins.
+ * @n_per_lp: The number of channels that support loopback the periodic
+ * output signal.
* @pps: Indicates whether the clock supports a PPS callback.
*
* @supported_perout_flags: The set of flags the driver supports for the
@@ -175,6 +177,11 @@ struct ptp_system_timestamp {
* scheduling time (>=0) or negative value in case further
* scheduling is not required.
*
+ * @perout_loopback: Request driver to enable or disable the periodic output
+ * signal loopback.
+ * parameter index: index of the periodic output signal channel.
+ * parameter on: caller passes one to enable or zero to disable.
+ *
* Drivers should embed their ptp_clock_info within a private
* structure, obtaining a reference to it using container_of().
*
@@ -189,6 +196,7 @@ struct ptp_clock_info {
int n_ext_ts;
int n_per_out;
int n_pins;
+ int n_per_lp;
int pps;
unsigned int supported_perout_flags;
unsigned int supported_extts_flags;
@@ -213,6 +221,8 @@ struct ptp_clock_info {
int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
long (*do_aux_work)(struct ptp_clock_info *ptp);
+ int (*perout_loopback)(struct ptp_clock_info *ptp, unsigned int index,
+ int on);
};
struct ptp_clock;
@@ -361,6 +371,24 @@ extern void ptp_clock_event(struct ptp_clock *ptp,
extern int ptp_clock_index(struct ptp_clock *ptp);
/**
+ * ptp_clock_index_by_of_node() - obtain the device index of
+ * a PTP clock based on the PTP device of_node
+ *
+ * @np: The device of_node pointer of the PTP device.
+ * Return: The PHC index on success or -1 on failure.
+ */
+int ptp_clock_index_by_of_node(struct device_node *np);
+
+/**
+ * ptp_clock_index_by_dev() - obtain the device index of
+ * a PTP clock based on the PTP device.
+ *
+ * @parent: The parent device (PTP device) pointer of the PTP clock.
+ * Return: The PHC index on success or -1 on failure.
+ */
+int ptp_clock_index_by_dev(struct device *parent);
+
+/**
* ptp_find_pin() - obtain the pin index of a given auxiliary function
*
* The caller must hold ptp_clock::pincfg_mux. Drivers do not have
@@ -425,6 +453,10 @@ static inline void ptp_clock_event(struct ptp_clock *ptp,
{ }
static inline int ptp_clock_index(struct ptp_clock *ptp)
{ return -1; }
+static inline int ptp_clock_index_by_of_node(struct device_node *np)
+{ return -1; }
+static inline int ptp_clock_index_by_dev(struct device *parent)
+{ return -1; }
static inline int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan)
{ return -1; }
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 551329220e4f..534531807d95 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -243,6 +243,24 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
return ret;
}
+/* Zero entries from tail to specified head.
+ * NB: if consumer_head can be >= r->size need to fixup tail later.
+ */
+static inline void __ptr_ring_zero_tail(struct ptr_ring *r, int consumer_head)
+{
+ int head = consumer_head;
+
+ /* Zero out entries in the reverse order: this way we touch the
+ * cache line that producer might currently be reading the last;
+ * producer won't make progress and touch other cache lines
+ * besides the first one until we write out all entries.
+ */
+ while (likely(head > r->consumer_tail))
+ r->queue[--head] = NULL;
+
+ r->consumer_tail = consumer_head;
+}
+
/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
{
@@ -261,8 +279,7 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
/* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
* to work correctly.
*/
- int consumer_head = r->consumer_head;
- int head = consumer_head++;
+ int consumer_head = r->consumer_head + 1;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
@@ -270,16 +287,9 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
* but helps keep the implementation simple.
*/
if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
- consumer_head >= r->size)) {
- /* Zero out entries in the reverse order: this way we touch the
- * cache line that producer might currently be reading the last;
- * producer won't make progress and touch other cache lines
- * besides the first one until we write out all entries.
- */
- while (likely(head >= r->consumer_tail))
- r->queue[head--] = NULL;
- r->consumer_tail = consumer_head;
- }
+ consumer_head >= r->size))
+ __ptr_ring_zero_tail(r, consumer_head);
+
if (unlikely(consumer_head >= r->size)) {
consumer_head = 0;
r->consumer_tail = 0;
@@ -513,7 +523,6 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
void (*destroy)(void *))
{
unsigned long flags;
- int head;
spin_lock_irqsave(&r->consumer_lock, flags);
spin_lock(&r->producer_lock);
@@ -525,17 +534,14 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
* Clean out buffered entries (for simplicity). This way following code
* can test entries for NULL and if not assume they are valid.
*/
- head = r->consumer_head - 1;
- while (likely(head >= r->consumer_tail))
- r->queue[head--] = NULL;
- r->consumer_tail = r->consumer_head;
+ __ptr_ring_zero_tail(r, r->consumer_head);
/*
* Go over entries in batch, start moving head back and copy entries.
* Stop when we run into previously unconsumed entries.
*/
while (n) {
- head = r->consumer_head - 1;
+ int head = r->consumer_head - 1;
if (head < 0)
head = r->size - 1;
if (r->queue[head]) {
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 6cd020eea37a..daa92a58585d 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -394,18 +394,8 @@ typedef int __bitwise rmap_t;
/* The anonymous (sub)page is exclusive to a single process. */
#define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0))
-/*
- * Internally, we're using an enum to specify the granularity. We make the
- * compiler emit specialized code for each granularity.
- */
-enum rmap_level {
- RMAP_LEVEL_PTE = 0,
- RMAP_LEVEL_PMD,
- RMAP_LEVEL_PUD,
-};
-
-static inline void __folio_rmap_sanity_checks(const struct folio *folio,
- const struct page *page, int nr_pages, enum rmap_level level)
+static __always_inline void __folio_rmap_sanity_checks(const struct folio *folio,
+ const struct page *page, int nr_pages, enum pgtable_level level)
{
/* hugetlb folios are handled separately. */
VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
@@ -427,18 +417,18 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio,
VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio);
switch (level) {
- case RMAP_LEVEL_PTE:
+ case PGTABLE_LEVEL_PTE:
break;
- case RMAP_LEVEL_PMD:
+ case PGTABLE_LEVEL_PMD:
/*
* We don't support folios larger than a single PMD yet. So
- * when RMAP_LEVEL_PMD is set, we assume that we are creating
+ * when PGTABLE_LEVEL_PMD is set, we assume that we are creating
* a single "entire" mapping of the folio.
*/
VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio);
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio);
break;
- case RMAP_LEVEL_PUD:
+ case PGTABLE_LEVEL_PUD:
/*
* Assume that we are creating a single "entire" mapping of the
* folio.
@@ -447,7 +437,7 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio,
VM_WARN_ON_FOLIO(nr_pages != HPAGE_PUD_NR, folio);
break;
default:
- VM_WARN_ON_ONCE(true);
+ BUILD_BUG();
}
/*
@@ -567,14 +557,14 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
static __always_inline void __folio_dup_file_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
- enum rmap_level level)
+ enum pgtable_level level)
{
const int orig_nr_pages = nr_pages;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
- case RMAP_LEVEL_PTE:
+ case PGTABLE_LEVEL_PTE:
if (!folio_test_large(folio)) {
atomic_inc(&folio->_mapcount);
break;
@@ -587,11 +577,13 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
}
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
- case RMAP_LEVEL_PMD:
- case RMAP_LEVEL_PUD:
+ case PGTABLE_LEVEL_PMD:
+ case PGTABLE_LEVEL_PUD:
atomic_inc(&folio->_entire_mapcount);
folio_inc_large_mapcount(folio, dst_vma);
break;
+ default:
+ BUILD_BUG();
}
}
@@ -609,13 +601,13 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
static inline void folio_dup_file_rmap_ptes(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma)
{
- __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, nr_pages, dst_vma, PGTABLE_LEVEL_PTE);
}
static __always_inline void folio_dup_file_rmap_pte(struct folio *folio,
struct page *page, struct vm_area_struct *dst_vma)
{
- __folio_dup_file_rmap(folio, page, 1, dst_vma, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, 1, dst_vma, PGTABLE_LEVEL_PTE);
}
/**
@@ -632,7 +624,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio,
struct page *page, struct vm_area_struct *dst_vma)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, RMAP_LEVEL_PTE);
+ __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, dst_vma, PGTABLE_LEVEL_PTE);
#else
WARN_ON_ONCE(true);
#endif
@@ -640,7 +632,7 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio,
static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
- struct vm_area_struct *src_vma, enum rmap_level level)
+ struct vm_area_struct *src_vma, enum pgtable_level level)
{
const int orig_nr_pages = nr_pages;
bool maybe_pinned;
@@ -665,7 +657,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
* copying if the folio maybe pinned.
*/
switch (level) {
- case RMAP_LEVEL_PTE:
+ case PGTABLE_LEVEL_PTE:
if (unlikely(maybe_pinned)) {
for (i = 0; i < nr_pages; i++)
if (PageAnonExclusive(page + i))
@@ -687,8 +679,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
} while (page++, --nr_pages > 0);
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
- case RMAP_LEVEL_PMD:
- case RMAP_LEVEL_PUD:
+ case PGTABLE_LEVEL_PMD:
+ case PGTABLE_LEVEL_PUD:
if (PageAnonExclusive(page)) {
if (unlikely(maybe_pinned))
return -EBUSY;
@@ -697,6 +689,8 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
atomic_inc(&folio->_entire_mapcount);
folio_inc_large_mapcount(folio, dst_vma);
break;
+ default:
+ BUILD_BUG();
}
return 0;
}
@@ -730,7 +724,7 @@ static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio,
struct vm_area_struct *src_vma)
{
return __folio_try_dup_anon_rmap(folio, page, nr_pages, dst_vma,
- src_vma, RMAP_LEVEL_PTE);
+ src_vma, PGTABLE_LEVEL_PTE);
}
static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
@@ -738,7 +732,7 @@ static __always_inline int folio_try_dup_anon_rmap_pte(struct folio *folio,
struct vm_area_struct *src_vma)
{
return __folio_try_dup_anon_rmap(folio, page, 1, dst_vma, src_vma,
- RMAP_LEVEL_PTE);
+ PGTABLE_LEVEL_PTE);
}
/**
@@ -770,7 +764,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, dst_vma,
- src_vma, RMAP_LEVEL_PMD);
+ src_vma, PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
return -EBUSY;
@@ -778,7 +772,7 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
}
static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
- struct page *page, int nr_pages, enum rmap_level level)
+ struct page *page, int nr_pages, enum pgtable_level level)
{
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
@@ -873,7 +867,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
struct page *page)
{
- return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE);
+ return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
}
/**
@@ -904,7 +898,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR,
- RMAP_LEVEL_PMD);
+ PGTABLE_LEVEL_PMD);
#else
WARN_ON_ONCE(true);
return -EBUSY;
@@ -928,6 +922,11 @@ struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr,
/* Look for migration entries rather than present PTEs */
#define PVMW_MIGRATION (1 << 1)
+/* Result flags */
+
+/* The page is mapped across page table boundary */
+#define PVMW_PGTABLE_CROSSED (1 << 16)
+
struct page_vma_mapped_walk {
unsigned long pfn;
unsigned long nr_pages;
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index fa9f1021541e..ede4c6bf6f22 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -44,6 +44,16 @@ static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock)
return READ_ONCE(lock->owner) != NULL;
}
+#ifdef CONFIG_RT_MUTEXES
+#define RT_MUTEX_HAS_WAITERS 1UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
+{
+ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+
+ return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
+}
+#endif
extern void rt_mutex_base_init(struct rt_mutex_base *rtb);
/**
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 6f8a4965f9b9..29f6ceb98d74 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -158,6 +158,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
+ VM_WARN_ON_ONCE(!page_range_contiguous(page, ALIGN(len + offset, PAGE_SIZE) / PAGE_SIZE));
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
@@ -600,7 +601,7 @@ void __sg_page_iter_start(struct sg_page_iter *piter,
*/
static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
{
- return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+ return sg_page(piter->sg) + piter->sg_pgoffset;
}
/**
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 6eb65ceed213..b7fafe999073 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -8,6 +8,20 @@
#define SUID_DUMP_USER 1 /* Dump as user of process */
#define SUID_DUMP_ROOT 2 /* Dump as root */
+static inline unsigned long __mm_flags_get_dumpable(struct mm_struct *mm)
+{
+ /*
+ * By convention, dumpable bits are contained in first 32 bits of the
+ * bitmap, so we can simply access this first unsigned long directly.
+ */
+ return __mm_flags_get_word(mm);
+}
+
+static inline void __mm_flags_set_mask_dumpable(struct mm_struct *mm, int value)
+{
+ __mm_flags_set_mask_bits_word(mm, MMF_DUMPABLE_MASK, value);
+}
+
extern void set_dumpable(struct mm_struct *mm, int value);
/*
* This returns the actual value of the suid_dumpable flag. For things
@@ -22,7 +36,9 @@ static inline int __get_dumpable(unsigned long mm_flags)
static inline int get_dumpable(struct mm_struct *mm)
{
- return __get_dumpable(mm->flags);
+ unsigned long flags = __mm_flags_get_dumpable(mm);
+
+ return __get_dumpable(flags);
}
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 2201da0afecc..0232d983b715 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -178,7 +178,7 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
#endif
extern void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack);
+ const struct rlimit *rlim_stack);
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
@@ -211,7 +211,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long flags, vm_flags_t vm_flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm,
- struct rlimit *rlim_stack) {}
+ const struct rlimit *rlim_stack) {}
#endif
static inline bool in_vfork(struct task_struct *tsk)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 34d6a0e108c3..525aa2a632b2 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -210,9 +210,8 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
- * Nests both inside and outside of read_lock(&tasklist_lock).
- * It must not be nested with write_lock_irq(&tasklist_lock),
- * neither inside nor outside.
+ * Nests inside of read_lock(&tasklist_lock). It must not be nested with
+ * write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
{
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 688466a0e816..aafaac1496b0 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -153,7 +153,7 @@ struct scmi_perf_domain_info {
* for a given device
* @fast_switch_rate_limit: gets the minimum time (us) required between
* successive fast_switching requests
- * @power_scale_mw_get: indicates if the power values provided are in milliWatts
+ * @power_scale_get: indicates if the power values provided are in milliWatts
* or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 923d68e07679..1690706206e8 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -12,6 +12,7 @@
#define SCREEN_INFO_MAX_RESOURCES 3
struct pci_dev;
+struct pixel_format;
struct resource;
static inline bool __screen_info_has_lfb(unsigned int type)
@@ -136,6 +137,7 @@ static inline u32 __screen_info_vesapm_info_base(const struct screen_info *si)
ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
u32 __screen_info_lfb_bits_per_pixel(const struct screen_info *si);
+int screen_info_pixel_format(const struct screen_info *si, struct pixel_format *f);
#if defined(CONFIG_PCI)
void screen_info_apply_fixups(void);
diff --git a/include/linux/security.h b/include/linux/security.h
index bd33f194c94a..92ac3f27b973 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -391,7 +391,7 @@ int security_dentry_init_security(struct dentry *dentry, int mode,
const char **xattr_name,
struct lsm_context *lsmcxt);
int security_dentry_create_files_as(struct dentry *dentry, int mode,
- struct qstr *name,
+ const struct qstr *name,
const struct cred *old,
struct cred *new);
int security_path_notify(const struct path *path, u64 mask,
@@ -872,7 +872,7 @@ static inline int security_dentry_init_security(struct dentry *dentry,
}
static inline int security_dentry_create_files_as(struct dentry *dentry,
- int mode, struct qstr *name,
+ int mode, const struct qstr *name,
const struct cred *old,
struct cred *new)
{
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 60c65cea74f6..5c71945a5e4d 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -522,6 +522,28 @@ struct ethtool_modinfo;
struct sfp_bus;
/**
+ * struct sfp_module_caps - sfp module capabilities
+ * @interfaces: bitmap of interfaces that the module may support
+ * @link_modes: bitmap of ethtool link modes that the module may support
+ */
+struct sfp_module_caps {
+ DECLARE_PHY_INTERFACE_MASK(interfaces);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(link_modes);
+ /**
+ * @may_have_phy: indicate whether the module may have an ethernet PHY
+ * There is no way to be sure that a module has a PHY as the EEPROM
+ * doesn't contain this information. When set, this does not mean that
+ * the module definitely has a PHY.
+ */
+ bool may_have_phy;
+ /**
+ * @port: one of ethtool %PORT_* definitions, parsed from the module
+ * EEPROM, or %PORT_OTHER if the port type is not known.
+ */
+ u8 port;
+};
+
+/**
* struct sfp_upstream_ops - upstream operations structure
* @attach: called when the sfp socket driver is bound to the upstream
* (mandatory).
@@ -554,11 +576,7 @@ struct sfp_upstream_ops {
};
#if IS_ENABLED(CONFIG_SFP)
-int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support);
-bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
-void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
- unsigned long *support, unsigned long *interfaces);
+const struct sfp_module_caps *sfp_get_module_caps(struct sfp_bus *bus);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
const unsigned long *link_modes);
@@ -578,24 +596,10 @@ int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
void sfp_bus_del_upstream(struct sfp_bus *bus);
const char *sfp_get_name(struct sfp_bus *bus);
#else
-static inline int sfp_parse_port(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
- unsigned long *support)
-{
- return PORT_OTHER;
-}
-
-static inline bool sfp_may_have_phy(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id)
-{
- return false;
-}
-
-static inline void sfp_parse_support(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
- unsigned long *support,
- unsigned long *interfaces)
+static inline const struct sfp_module_caps *
+sfp_get_module_caps(struct sfp_bus *bus)
{
+ return NULL;
}
static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 6d0f9c599ff7..0e47465ef0fd 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -99,9 +99,9 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
-bool shmem_mapping(struct address_space *mapping);
+bool shmem_mapping(const struct address_space *mapping);
#else
-static inline bool shmem_mapping(struct address_space *mapping)
+static inline bool shmem_mapping(const struct address_space *mapping)
{
return false;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fa633657e4c0..fb3fec9affaa 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -674,7 +674,7 @@ enum {
/* This indicates the tcp segment has CWR set. */
SKB_GSO_TCP_ECN = 1 << 2,
- SKB_GSO_TCP_FIXEDID = 1 << 3,
+ __SKB_GSO_TCP_FIXEDID = 1 << 3,
SKB_GSO_TCPV6 = 1 << 4,
@@ -707,6 +707,12 @@ enum {
SKB_GSO_FRAGLIST = 1 << 18,
SKB_GSO_TCP_ACCECN = 1 << 19,
+
+ /* These indirectly map onto the same netdev feature.
+ * If NETIF_F_TSO_MANGLEID is set it may mangle both inner and outer IDs.
+ */
+ SKB_GSO_TCP_FIXEDID = 1 << 30,
+ SKB_GSO_TCP_FIXEDID_INNER = 1 << 31,
};
#if BITS_PER_LONG > 32
@@ -1159,6 +1165,45 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
}
+static inline void skb_dst_check_unset(struct sk_buff *skb)
+{
+ DEBUG_NET_WARN_ON_ONCE((skb->_skb_refdst & SKB_DST_PTRMASK) &&
+ !(skb->_skb_refdst & SKB_DST_NOREF));
+}
+
+/**
+ * skb_dstref_steal() - return current dst_entry value and clear it
+ * @skb: buffer
+ *
+ * Resets skb dst_entry without adjusting its reference count. Useful in
+ * cases where dst_entry needs to be temporarily reset and restored.
+ * Note that the returned value cannot be used directly because it
+ * might contain SKB_DST_NOREF bit.
+ *
+ * When in doubt, prefer skb_dst_drop() over skb_dstref_steal() to correctly
+ * handle dst_entry reference counting.
+ *
+ * Returns: original skb dst_entry.
+ */
+static inline unsigned long skb_dstref_steal(struct sk_buff *skb)
+{
+ unsigned long refdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0;
+ return refdst;
+}
+
+/**
+ * skb_dstref_restore() - restore skb dst_entry removed via skb_dstref_steal()
+ * @skb: buffer
+ * @refdst: dst entry from a call to skb_dstref_steal()
+ */
+static inline void skb_dstref_restore(struct sk_buff *skb, unsigned long refdst)
+{
+ skb_dst_check_unset(skb);
+ skb->_skb_refdst = refdst;
+}
+
/**
* skb_dst_set - sets skb dst
* @skb: buffer
@@ -1169,6 +1214,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
*/
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb_dst_check_unset(skb);
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst;
}
@@ -1185,6 +1231,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
*/
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
+ skb_dst_check_unset(skb);
WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
skb->slow_gro |= !!dst;
skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
@@ -4861,6 +4908,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_MCTP_FLOWS)
SKB_EXT_MCTP,
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ SKB_EXT_PSP,
+#endif
SKB_EXT_NUM, /* must be last */
};
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 0b9095a281b8..49847888c287 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -315,7 +315,7 @@ static inline bool sk_psock_test_state(const struct sk_psock *psock,
static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
{
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
kfree_skb(skb);
}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index d5a8ab98035c..cf443f064a66 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -335,6 +335,37 @@ struct kmem_cache_args {
* %NULL means no constructor.
*/
void (*ctor)(void *);
+ /**
+ * @sheaf_capacity: Enable sheaves of given capacity for the cache.
+ *
+ * With a non-zero value, allocations from the cache go through caching
+ * arrays called sheaves. Each cpu has a main sheaf that's always
+ * present, and a spare sheaf that may be not present. When both become
+ * empty, there's an attempt to replace an empty sheaf with a full sheaf
+ * from the per-node barn.
+ *
+ * When no full sheaf is available, and gfp flags allow blocking, a
+ * sheaf is allocated and filled from slab(s) using bulk allocation.
+ * Otherwise the allocation falls back to the normal operation
+ * allocating a single object from a slab.
+ *
+ * Analogically when freeing and both percpu sheaves are full, the barn
+ * may replace it with an empty sheaf, unless it's over capacity. In
+ * that case a sheaf is bulk freed to slab pages.
+ *
+ * The sheaves do not enforce NUMA placement of objects, so allocations
+ * via kmem_cache_alloc_node() with a node specified other than
+ * NUMA_NO_NODE will bypass them.
+ *
+ * Bulk allocation and free operations also try to use the cpu sheaves
+ * and barn, but fallback to using slab pages directly.
+ *
+ * When slub_debug is enabled for the cache, the sheaf_capacity argument
+ * is ignored.
+ *
+ * %0 means no sheaves will be created.
+ */
+ unsigned int sheaf_capacity;
};
struct kmem_cache *__kmem_cache_create_args(const char *name,
@@ -465,11 +496,16 @@ int kmem_cache_shrink(struct kmem_cache *s);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check krealloc_noprof(const void *objp, size_t new_size,
- gfp_t flags) __realloc_size(2);
-#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__))
+void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
+ unsigned long align,
+ gfp_t flags, int nid) __realloc_size(2);
+#define krealloc_noprof(_o, _s, _f) krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
+#define krealloc_node_align(...) alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
+#define krealloc_node(_o, _s, _f, _n) krealloc_node_align(_o, _s, 1, _f, _n)
+#define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
void kfree(const void *objp);
+void kfree_nolock(const void *objp);
void kfree_sensitive(const void *objp);
size_t __ksize(const void *objp);
@@ -798,6 +834,22 @@ void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
int node) __assume_slab_alignment __malloc;
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
+struct slab_sheaf *
+kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
+
+int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf **sheafp, unsigned int size);
+
+void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
+ struct slab_sheaf *sheaf);
+
+void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
+ struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_from_sheaf(...) \
+ alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
+
+unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
+
/*
* These macros allow declaring a kmem_buckets * parameter alongside size, which
* can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
@@ -910,6 +962,9 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
}
#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
+void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
+#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
+
#define kmem_buckets_alloc(_b, _size, _flags) \
alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
@@ -1041,18 +1096,20 @@ static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
-void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1);
-#define kvmalloc_node_noprof(size, flags, node) \
- __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node)
-#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
-
-#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE)
-#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE)
+void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
+ gfp_t flags, int node) __alloc_size(1);
+#define kvmalloc_node_align_noprof(_size, _align, _flags, _node) \
+ __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
+#define kvmalloc_node_align(...) \
+ alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
+#define kvmalloc_node(_s, _f, _n) kvmalloc_node_align(_s, 1, _f, _n)
+#define kvmalloc(...) kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO)
#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
+
#define kmem_buckets_valloc(_b, _size, _flags) \
- alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
+ alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
static inline __alloc_size(1, 2) void *
kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
@@ -1062,7 +1119,7 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
- return kvmalloc_node_noprof(bytes, flags, node);
+ return kvmalloc_node_align_noprof(bytes, 1, flags, node);
}
#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
@@ -1073,9 +1130,12 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
-void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
- __realloc_size(2);
-#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
+void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
+ gfp_t flags, int nid) __realloc_size(2);
+#define kvrealloc_node_align(...) \
+ alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
+#define kvrealloc_node(_p, _s, _f, _n) kvrealloc_node_align(_p, _s, 1, _f, _n)
+#define kvrealloc(...) kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)
extern void kvfree(const void *addr);
DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
diff --git a/include/linux/soc/airoha/airoha_offload.h b/include/linux/soc/airoha/airoha_offload.h
new file mode 100644
index 000000000000..6f66eb339b3f
--- /dev/null
+++ b/include/linux/soc/airoha/airoha_offload.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+#ifndef AIROHA_OFFLOAD_H
+#define AIROHA_OFFLOAD_H
+
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+enum {
+ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+};
+
+struct airoha_ppe_dev {
+ struct {
+ int (*setup_tc_block_cb)(struct airoha_ppe_dev *dev,
+ void *type_data);
+ void (*check_skb)(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb, u16 hash,
+ bool rx_wlan);
+ } ops;
+
+ void *priv;
+};
+
+#if (IS_BUILTIN(CONFIG_NET_AIROHA) || IS_MODULE(CONFIG_NET_AIROHA))
+struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev);
+void airoha_ppe_put_dev(struct airoha_ppe_dev *dev);
+
+static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
+{
+ return dev->ops.setup_tc_block_cb(dev, type_data);
+}
+
+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb,
+ u16 hash, bool rx_wlan)
+{
+ dev->ops.check_skb(dev, skb, hash, rx_wlan);
+}
+#else
+static inline struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
+{
+}
+
+static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
+ void *type_data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
+ struct sk_buff *skb, u16 hash,
+ bool rx_wlan)
+{
+}
+#endif
+
+#define NPU_NUM_CORES 8
+#define NPU_NUM_IRQ 6
+#define NPU_RX0_DESC_NUM 512
+#define NPU_RX1_DESC_NUM 512
+
+/* CTRL */
+#define NPU_RX_DMA_DESC_LAST_MASK BIT(29)
+#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(28, 15)
+#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(14, 1)
+#define NPU_RX_DMA_DESC_DONE_MASK BIT(0)
+/* INFO */
+#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 28)
+#define NPU_RX_DMA_PKT_ID_MASK GENMASK(28, 26)
+#define NPU_RX_DMA_SRC_PORT_MASK GENMASK(25, 21)
+#define NPU_RX_DMA_CRSN_MASK GENMASK(20, 16)
+#define NPU_RX_DMA_FOE_ID_MASK GENMASK(15, 0)
+/* DATA */
+#define NPU_RX_DMA_SID_MASK GENMASK(31, 16)
+#define NPU_RX_DMA_FRAG_TYPE_MASK GENMASK(15, 14)
+#define NPU_RX_DMA_PRIORITY_MASK GENMASK(13, 10)
+#define NPU_RX_DMA_RADIO_ID_MASK GENMASK(9, 6)
+#define NPU_RX_DMA_VAP_ID_MASK GENMASK(5, 2)
+#define NPU_RX_DMA_FRAME_TYPE_MASK GENMASK(1, 0)
+
+struct airoha_npu_rx_dma_desc {
+ u32 ctrl;
+ u32 info;
+ u32 data;
+ u32 addr;
+ u64 rsv;
+} __packed;
+
+/* CTRL */
+#define NPU_TX_DMA_DESC_SCHED_MASK BIT(31)
+#define NPU_TX_DMA_DESC_LEN_MASK GENMASK(30, 18)
+#define NPU_TX_DMA_DESC_VEND_LEN_MASK GENMASK(17, 1)
+#define NPU_TX_DMA_DESC_DONE_MASK BIT(0)
+
+#define NPU_TXWI_LEN 192
+
+struct airoha_npu_tx_dma_desc {
+ u32 ctrl;
+ u32 addr;
+ u64 rsv;
+ u8 txwi[NPU_TXWI_LEN];
+} __packed;
+
+enum airoha_npu_wlan_set_cmd {
+ WLAN_FUNC_SET_WAIT_PCIE_ADDR,
+ WLAN_FUNC_SET_WAIT_DESC,
+ WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
+ WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
+ WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
+ WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
+ WLAN_FUNC_SET_WAIT_DEL_STA,
+ WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
+ WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
+ WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
+ WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
+ WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
+ WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
+ WLAN_FUNC_SET_WAIT_PCIE_STATE,
+ WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
+ WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
+ WLAN_FUNC_SET_WAIT_BAR_INFO,
+ WLAN_FUNC_SET_WAIT_FAST_FLAG,
+ WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
+ WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
+ WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
+ WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
+ WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
+ WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
+ WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
+ WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
+ WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
+ WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
+ WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
+ WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
+ WLAN_FUNC_SET_WAIT_HWNAT_INIT,
+ WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
+ WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
+ WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
+};
+
+enum airoha_npu_wlan_get_cmd {
+ WLAN_FUNC_GET_WAIT_NPU_INFO,
+ WLAN_FUNC_GET_WAIT_LAST_RATE,
+ WLAN_FUNC_GET_WAIT_COUNTER,
+ WLAN_FUNC_GET_WAIT_DBG_COUNTER,
+ WLAN_FUNC_GET_WAIT_RXDESC_BASE,
+ WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
+ WLAN_FUNC_GET_WAIT_DMA_ADDR,
+ WLAN_FUNC_GET_WAIT_RING_SIZE,
+ WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
+ WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
+ WLAN_FUNC_GET_WAIT_NPU_VERSION,
+};
+
+struct airoha_npu {
+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct airoha_npu_core {
+ struct airoha_npu *npu;
+ /* protect concurrent npu memory accesses */
+ spinlock_t lock;
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
+ int irqs[NPU_NUM_IRQ];
+
+ struct airoha_foe_stats __iomem *stats;
+
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+ int (*ppe_init_stats)(struct airoha_npu *npu,
+ dma_addr_t addr, u32 num_stats_entries);
+ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries);
+ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
+ int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_set_cmd func_id,
+ void *data, int data_len, gfp_t gfp);
+ int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd func_id,
+ void *data, int data_len, gfp_t gfp);
+ u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
+ bool xmit);
+ void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
+ u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
+ void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
+ void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
+ } ops;
+#endif
+};
+
+#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
+struct airoha_npu *airoha_npu_get(struct device *dev);
+void airoha_npu_put(struct airoha_npu *npu);
+
+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
+{
+ return npu->ops.wlan_init_reserved_memory(npu);
+}
+
+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
+ int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return npu->ops.wlan_send_msg(npu, ifindex, cmd, data, data_len, gfp);
+}
+
+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return npu->ops.wlan_get_msg(npu, ifindex, cmd, data, data_len, gfp);
+}
+
+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
+ int qid, bool xmit)
+{
+ return npu->ops.wlan_get_queue_addr(npu, qid, xmit);
+}
+
+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
+ u32 val)
+{
+ npu->ops.wlan_set_irq_status(npu, val);
+}
+
+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu, int q)
+{
+ return npu->ops.wlan_get_irq_status(npu, q);
+}
+
+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
+{
+ npu->ops.wlan_enable_irq(npu, q);
+}
+
+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
+{
+ npu->ops.wlan_disable_irq(npu, q);
+}
+#else
+static inline struct airoha_npu *airoha_npu_get(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void airoha_npu_put(struct airoha_npu *npu)
+{
+}
+
+static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
+ int ifindex,
+ enum airoha_npu_wlan_set_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
+ enum airoha_npu_wlan_get_cmd cmd,
+ void *data, int data_len, gfp_t gfp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
+ int qid, bool xmit)
+{
+ return 0;
+}
+
+static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
+ u32 val)
+{
+}
+
+static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu,
+ int q)
+{
+ return 0;
+}
+
+static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
+{
+}
+
+static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
+{
+}
+#endif
+
+#endif /* AIROHA_OFFLOAD_H */
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index d8949a4ed0dc..c4ff6bab176d 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -147,7 +147,7 @@ struct mtk_wed_device {
u32 wpdma_tx;
u32 wpdma_txfree;
u32 wpdma_rx_glo;
- u32 wpdma_rx;
+ u32 wpdma_rx[MTK_WED_RX_QUEUES];
u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
u32 wpdma_rx_pg;
diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h
index 2996a3c28ef3..0a984e2579fe 100644
--- a/include/linux/soc/qcom/geni-se.h
+++ b/include/linux/soc/qcom/geni-se.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#ifndef _LINUX_QCOM_GENI_SE
@@ -36,6 +37,7 @@ enum geni_se_protocol_type {
GENI_SE_I2C,
GENI_SE_I3C,
GENI_SE_SPI_SLAVE,
+ GENI_SE_INVALID_PROTO = 255,
};
struct geni_wrapper;
@@ -531,5 +533,7 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
int geni_icc_enable(struct geni_se *se);
int geni_icc_disable(struct geni_se *se);
+
+int geni_load_se_firmware(struct geni_se *se, enum geni_se_protocol_type protocol);
#endif
#endif
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index 9e8e60421192..8ea8230579a2 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -24,7 +24,7 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
phys_addr_t *reloc_base);
int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
- const char *fw_name, int pas_id, void *mem_region,
+ const char *fw_name, void *mem_region,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len,
@@ -54,9 +54,8 @@ static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw,
static inline int qcom_mdt_load_no_init(struct device *dev,
const struct firmware *fw,
- const char *fw_name, int pas_id,
- void *mem_region, phys_addr_t mem_phys,
- size_t mem_size,
+ const char *fw_name, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base)
{
return -ENODEV;
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 0832776262ac..e6a3476bcef1 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -19,6 +19,7 @@
struct dentry;
struct fwnode_handle;
+struct device_node;
struct sdw_bus;
struct sdw_slave;
@@ -1086,6 +1087,10 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
int sdw_stream_remove_slave(struct sdw_slave *slave,
struct sdw_stream_runtime *stream);
+struct device *of_sdw_find_device_by_node(struct device_node *np);
+
+int sdw_slave_get_current_bank(struct sdw_slave *sdev);
+
int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base);
/* messaging and data APIs */
@@ -1119,6 +1124,18 @@ static inline int sdw_stream_remove_slave(struct sdw_slave *slave,
return -EINVAL;
}
+static inline struct device *of_sdw_find_device_by_node(struct device_node *np)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return NULL;
+}
+
+static inline int sdw_slave_get_current_bank(struct sdw_slave *sdev)
+{
+ WARN_ONCE(1, "SoundWire API is disabled");
+ return -EINVAL;
+}
+
/* messaging and data APIs */
static inline int sdw_read(struct sdw_slave *slave, u32 addr)
{
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 26ddf95d23f9..fa1318bac06c 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -190,18 +190,27 @@ struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
/* MAC ----- optional PCS ----- SerDes ----- optional PHY ----- Media
- * ^ ^
- * mac_interface phy_interface
+ * ^
+ * phy_interface
*
- * mac_interface is the MAC-side interface, which may be the same
- * as phy_interface if there is no intervening PCS. If there is a
- * PCS, then mac_interface describes the interface mode between the
- * MAC and PCS, and phy_interface describes the interface mode
- * between the PCS and PHY.
- */
- phy_interface_t mac_interface;
- /* phy_interface is the PHY-side interface - the interface used by
- * an attached PHY.
+ * The Synopsys dwmac core only covers the MAC and an optional
+ * integrated PCS. Where the integrated PCS is used with a SerDes,
+ * e.g. for 1000base-X or Cisco SGMII, the connection between the
+ * PCS and SerDes will be TBI.
+ *
+ * Where the Synopsys dwmac core has been instantiated with multiple
+ * interface modes, these are selected via core-external configuration
+ * which is sampled when the dwmac core is reset. How this is done is
+ * platform glue specific, but this defines the interface used from
+ * the Synopsys dwmac core to the rest of the SoC.
+ *
+ * Where PCS other than the optional integrated Synopsys dwmac PCS
+ * is used, this counts as "the rest of the SoC" in the above
+ * paragraph.
+ *
+ * phy_interface is the PHY-side interface - the interface used by
+ * an attached PHY or SFP etc. This is equivalent to the interface
+ * that phylink uses.
*/
phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -238,7 +247,7 @@ struct plat_stmmacenet_data {
int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i,
phy_interface_t interface, int speed);
void (*fix_mac_speed)(void *priv, int speed, unsigned int mode);
- int (*fix_soc_reset)(void *priv, void __iomem *ioaddr);
+ int (*fix_soc_reset)(struct stmmac_priv *priv, void __iomem *ioaddr);
int (*serdes_powerup)(struct net_device *ndev, void *priv);
void (*serdes_powerdown)(struct net_device *ndev, void *priv);
int (*mac_finish)(struct net_device *ndev,
@@ -248,6 +257,8 @@ struct plat_stmmacenet_data {
void (*ptp_clk_freq_config)(struct stmmac_priv *priv);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
+ int (*suspend)(struct device *dev, void *priv);
+ int (*resume)(struct device *dev, void *priv);
struct mac_device_info *(*setup)(void *priv);
int (*clks_config)(void *priv, bool enabled);
int (*crosststamp)(ktime_t *device, struct system_counterval_t *system,
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index f6aeed07fe04..891f6173c951 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -23,43 +23,30 @@ extern unsigned int nlm_debug;
#define dprintk(fmt, ...) \
dfprintk(FACILITY, fmt, ##__VA_ARGS__)
-#define dprintk_cont(fmt, ...) \
- dfprintk_cont(FACILITY, fmt, ##__VA_ARGS__)
#define dprintk_rcu(fmt, ...) \
dfprintk_rcu(FACILITY, fmt, ##__VA_ARGS__)
-#define dprintk_rcu_cont(fmt, ...) \
- dfprintk_rcu_cont(FACILITY, fmt, ##__VA_ARGS__)
#undef ifdebug
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac))
-# define dfprintk(fac, fmt, ...) \
-do { \
- ifdebug(fac) \
- printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \
-} while (0)
+# if IS_ENABLED(CONFIG_SUNRPC_DEBUG_TRACE)
+# define __sunrpc_printk(fmt, ...) trace_printk(fmt, ##__VA_ARGS__)
+# else
+# define __sunrpc_printk(fmt, ...) printk(KERN_DEFAULT fmt, ##__VA_ARGS__)
+# endif
-# define dfprintk_cont(fac, fmt, ...) \
+# define dfprintk(fac, fmt, ...) \
do { \
ifdebug(fac) \
- printk(KERN_CONT fmt, ##__VA_ARGS__); \
+ __sunrpc_printk(fmt, ##__VA_ARGS__); \
} while (0)
# define dfprintk_rcu(fac, fmt, ...) \
do { \
ifdebug(fac) { \
rcu_read_lock(); \
- printk(KERN_DEFAULT fmt, ##__VA_ARGS__); \
- rcu_read_unlock(); \
- } \
-} while (0)
-
-# define dfprintk_rcu_cont(fac, fmt, ...) \
-do { \
- ifdebug(fac) { \
- rcu_read_lock(); \
- printk(KERN_CONT fmt, ##__VA_ARGS__); \
+ __sunrpc_printk(fmt, ##__VA_ARGS__); \
rcu_read_unlock(); \
} \
} while (0)
@@ -68,7 +55,6 @@ do { \
#else
# define ifdebug(fac) if (0)
# define dfprintk(fac, fmt, ...) do {} while (0)
-# define dfprintk_cont(fac, fmt, ...) do {} while (0)
# define dfprintk_rcu(fac, fmt, ...) do {} while (0)
# define RPC_IFDEBUG(x)
#endif
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 40cbe81360ed..5506d20857c3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -196,7 +196,7 @@ struct svc_rqst {
struct xdr_buf rq_arg;
struct xdr_stream rq_arg_stream;
struct xdr_stream rq_res_stream;
- struct page *rq_scratch_page;
+ struct folio *rq_scratch_folio;
struct xdr_buf rq_res;
unsigned long rq_maxpages; /* num of entries in rq_pages */
struct page * *rq_pages;
@@ -503,7 +503,7 @@ static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
xdr_init_decode(xdr, buf, argv->iov_base, NULL);
- xdr_set_scratch_page(xdr, rqstp->rq_scratch_page);
+ xdr_set_scratch_folio(xdr, rqstp->rq_scratch_folio);
}
/**
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 369a89aea186..fde60d4e2cd5 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -165,7 +165,8 @@ int svc_xprt_create(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags,
const struct cred *cred);
-void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net);
+void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net,
+ bool unregister);
void svc_xprt_received(struct svc_xprt *xprt);
void svc_xprt_enqueue(struct svc_xprt *xprt);
void svc_xprt_put(struct svc_xprt *xprt);
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 8a9ec617cf66..49278749ad0c 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -288,16 +288,16 @@ xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
}
/**
- * xdr_set_scratch_page - Attach a scratch buffer for decoding data
+ * xdr_set_scratch_folio - Attach a scratch buffer for decoding data
* @xdr: pointer to xdr_stream struct
- * @page: an anonymous page
+ * @page: an anonymous folio
*
* See xdr_set_scratch_buffer().
*/
static inline void
-xdr_set_scratch_page(struct xdr_stream *xdr, struct page *page)
+xdr_set_scratch_folio(struct xdr_stream *xdr, struct folio *folio)
{
- xdr_set_scratch_buffer(xdr, page_address(page), PAGE_SIZE);
+ xdr_set_scratch_buffer(xdr, folio_address(folio), folio_size(folio));
}
/**
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 317ae31e89b3..b02876f1ae38 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -418,6 +418,12 @@ static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) {
}
#endif /* CONFIG_HIBERNATION */
+#if defined(CONFIG_HIBERNATION) && defined(CONFIG_SUSPEND)
+bool pm_hibernation_mode_is_suspend(void);
+#else
+static inline bool pm_hibernation_mode_is_suspend(void) { return false; }
+#endif
+
int arch_resume_nosmt(void);
#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7012a0f758d8..e818fbade1e2 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -236,40 +236,6 @@ enum {
#define SWAP_CONT_MAX 0x7f /* Max count */
/*
- * We use this to track usage of a cluster. A cluster is a block of swap disk
- * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
- * free clusters are organized into a list. We fetch an entry from the list to
- * get a free cluster.
- *
- * The flags field determines if a cluster is free. This is
- * protected by cluster lock.
- */
-struct swap_cluster_info {
- spinlock_t lock; /*
- * Protect swap_cluster_info fields
- * other than list, and swap_info_struct->swap_map
- * elements corresponding to the swap cluster.
- */
- u16 count;
- u8 flags;
- u8 order;
- struct list_head list;
-};
-
-/* All on-list cluster must have a non-zero flag. */
-enum swap_cluster_flags {
- CLUSTER_FLAG_NONE = 0, /* For temporary off-list cluster */
- CLUSTER_FLAG_FREE,
- CLUSTER_FLAG_NONFULL,
- CLUSTER_FLAG_FRAG,
- /* Clusters with flags above are allocatable */
- CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG,
- CLUSTER_FLAG_FULL,
- CLUSTER_FLAG_DISCARD,
- CLUSTER_FLAG_MAX,
-};
-
-/*
* The first page in the swap file is the swap header, which is always marked
* bad to prevent it from being allocated as an entry. This also prevents the
* cluster to which it belongs being marked free. Therefore 0 is safe to use as
@@ -310,7 +276,6 @@ struct swap_info_struct {
/* list of cluster that contains at least one free slot */
struct list_head frag_clusters[SWAP_NR_ORDERS];
/* list of cluster that are fragmented or contented */
- atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS];
unsigned int pages; /* total of usable pages of swap */
atomic_long_t inuse_pages; /* number of those currently in use */
struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */
@@ -321,11 +286,8 @@ struct swap_info_struct {
struct completion comp; /* seldom referenced */
spinlock_t lock; /*
* protect map scan related fields like
- * swap_map, lowest_bit, highest_bit,
- * inuse_pages, cluster_next,
- * cluster_nr, lowest_alloc,
- * highest_alloc, free/discard cluster
- * list. other fields are only changed
+ * swap_map, inuse_pages and all cluster
+ * lists. other fields are only changed
* at swapon/swapoff, so are protected
* by swap_lock. changing flags need
* hold this lock and swap_lock. If
@@ -517,10 +479,7 @@ extern sector_t swapdev_block(int, pgoff_t);
extern int __swap_count(swp_entry_t entry);
extern bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-struct swap_info_struct *swp_swap_info(swp_entry_t entry);
struct backing_dev_info;
-extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
-extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
sector_t swap_folio_sector(struct folio *folio);
@@ -530,11 +489,6 @@ static inline void put_swap_device(struct swap_info_struct *si)
}
#else /* CONFIG_SWAP */
-static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
-{
- return NULL;
-}
-
static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
return NULL;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 57e478bfaef2..20b8c6e21fef 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -122,8 +122,9 @@ struct tcp_options_received {
smc_ok : 1, /* SMC seen on SYN packet */
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
- u8 saw_unknown:1, /* Received unknown option */
- unused:7;
+ u8 accecn:6, /* AccECN index in header, 0=no options */
+ saw_unknown:1, /* Received unknown option */
+ unused:1;
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
@@ -168,6 +169,11 @@ struct tcp_request_sock {
* after data-in-SYN.
*/
u8 syn_tos;
+ bool accecn_ok;
+ u8 syn_ect_snt: 2,
+ syn_ect_rcv: 2,
+ accecn_fail_mode:4;
+ u8 saw_accecn_opt :2;
#ifdef CONFIG_TCP_AO
u8 ao_keyid;
u8 ao_rcv_next;
@@ -209,6 +215,9 @@ struct tcp_sock {
u16 gso_segs; /* Max number of segs per GSO packet */
/* from STCP, retrans queue hinting */
struct sk_buff *retransmit_skb_hint;
+#if defined(CONFIG_TLS_DEVICE)
+ void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
+#endif
__cacheline_group_end(tcp_sock_read_tx);
/* TXRX read-mostly hotpath cache lines */
@@ -226,13 +235,13 @@ struct tcp_sock {
repair : 1,
tcp_usec_ts : 1, /* TSval values in usec */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
+ recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
__cacheline_group_end(tcp_sock_read_txrx);
/* RX read-mostly hotpath cache lines */
__cacheline_group_begin(tcp_sock_read_rx);
u32 copied_seq; /* Head of yet unread data */
- u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 snd_wl1; /* Sequence for window update */
u32 tlp_high_seq; /* snd_nxt at the time of TLP */
u32 rttvar_us; /* smoothed mdev_max */
@@ -240,14 +249,10 @@ struct tcp_sock {
u16 advmss; /* Advertised MSS */
u16 urg_data; /* Saved octet of OOB data and control flags */
u32 lost; /* Total data packets lost incl. rexmits */
+ u32 snd_ssthresh; /* Slow start size threshold */
struct minmax rtt_min;
/* OOO segments go in this rbtree. Socket lock must be held. */
struct rb_root out_of_order_queue;
-#if defined(CONFIG_TLS_DEVICE)
- void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq);
-#endif
- u32 snd_ssthresh; /* Slow start size threshold */
- u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */
__cacheline_group_end(tcp_sock_read_rx);
/* TX read-write hotpath cache lines */
@@ -270,6 +275,7 @@ struct tcp_sock {
u32 mdev_us; /* medium deviation */
u32 rtt_seq; /* sequence number to update rttvar */
u64 tcp_wstamp_ns; /* departure time for next sent data packet */
+ u64 accecn_opt_tstamp; /* Last AccECN option sent timestamp */
struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */
struct sk_buff *highest_sack; /* skb just after the highest
* skb with SACKed bit set
@@ -285,6 +291,14 @@ struct tcp_sock {
* Header prediction flags
* 0x5?10 << 16 + snd_wnd in net byte order
*/
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
+ u8 received_ce_pending:4, /* Not yet transmit cnt of received_ce */
+ unused2:4;
+ u8 accecn_minlen:2,/* Minimum length of AccECN option sent */
+ est_ecnfield:2,/* ECN field for AccECN delivered estimates */
+ accecn_opt_demand:2,/* Demand AccECN option for n next ACKs */
+ prev_ecnfield:2; /* ECN bits from the previous segment */
__be32 pred_flags;
u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */
u64 tcp_mstamp; /* most recent packet received/sent */
@@ -297,14 +311,18 @@ struct tcp_sock {
u32 snd_up; /* Urgent pointer */
u32 delivered; /* Total data packets delivered incl. rexmits */
u32 delivered_ce; /* Like the above but only ECE marked packets */
+ u32 received_ce; /* Like the above but for rcvd CE marked pkts */
+ u32 received_ecn_bytes[3]; /* received byte counters for three ECN
+ * types: INET_ECN_ECT_1, INET_ECN_ECT_0,
+ * and INET_ECN_CE
+ */
u32 app_limited; /* limited until "delivered" reaches this val */
u32 rcv_wnd; /* Current receiver window */
+ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
struct tcp_options_received rx_opt;
- u8 nonagle : 4,/* Disable Nagle algorithm? */
- rate_app_limited:1; /* rate_{delivered,interval_us} limited? */
__cacheline_group_end(tcp_sock_write_txrx);
/* RX read-write hotpath cache lines */
@@ -326,6 +344,7 @@ struct tcp_sock {
u32 rate_delivered; /* saved rate sample: packets delivered */
u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_rtt_last_tsecr;
+ u32 delivered_ecn_bytes[3];
u64 first_tx_mstamp; /* start of window send phase */
u64 delivered_mstamp; /* time we reached "delivered" */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
@@ -372,7 +391,8 @@ struct tcp_sock {
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
- unused:5;
+ syn_ect_snt:2, /* AccECN ECT memory, only */
+ syn_ect_rcv:2; /* ... needed during 3WHS + first seqno */
u8 thin_lto : 1,/* Use linear timeouts for thin streams */
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
@@ -388,6 +408,8 @@ struct tcp_sock {
syn_fastopen_child:1; /* created TFO passive child socket */
u8 keepalive_probes; /* num of allowed keep alive probes */
+ u8 accecn_fail_mode:4, /* AccECN failure handling */
+ saw_accecn_opt:2; /* An AccECN option was seen */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
/* RTT measurement */
@@ -426,6 +448,9 @@ struct tcp_sock {
* the first SYN. */
u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
+ u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
+ * while socket was owned by user.
+ */
u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans
* Total data bytes retransmitted
*/
@@ -472,9 +497,6 @@ struct tcp_sock {
u32 probe_seq_end;
} mtu_probe;
u32 plb_rehash; /* PLB-triggered rehash attempts */
- u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
- * while socket was owned by user.
- */
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h
index a38494d6b5f4..1f3e5dad6d0d 100644
--- a/include/linux/tee_core.h
+++ b/include/linux/tee_core.h
@@ -8,9 +8,11 @@
#include <linux/cdev.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/scatterlist.h>
#include <linux/tee.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
@@ -26,10 +28,19 @@
#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
+#define TEE_SHM_DMA_BUF BIT(4) /* Memory with dma-buf handle */
+#define TEE_SHM_DMA_MEM BIT(5) /* Memory allocated with */
+ /* dma_alloc_pages() */
#define TEE_DEVICE_FLAG_REGISTERED 0x1
#define TEE_MAX_DEV_NAME_LEN 32
+enum tee_dma_heap_id {
+ TEE_DMA_HEAP_SECURE_VIDEO_PLAY = 1,
+ TEE_DMA_HEAP_TRUSTED_UI,
+ TEE_DMA_HEAP_SECURE_VIDEO_RECORD,
+};
+
/**
* struct tee_device - TEE Device representation
* @name: name of device
@@ -65,22 +76,30 @@ struct tee_device {
/**
* struct tee_driver_ops - driver operations vtable
* @get_version: returns version of driver
- * @open: called when the device file is opened
- * @release: release this open file
+ * @open: called for a context when the device file is opened
+ * @close_context: called when the device file is closed
+ * @release: called to release the context
* @open_session: open a new session
* @close_session: close a session
* @system_session: declare session as a system session
* @invoke_func: invoke a trusted function
+ * @object_invoke_func: invoke a TEE object
* @cancel_req: request cancel of an ongoing invoke or open
* @supp_recv: called for supplicant to get a command
* @supp_send: called for supplicant to send a response
* @shm_register: register shared memory buffer in TEE
* @shm_unregister: unregister shared memory buffer in TEE
+ *
+ * The context given to @open might last longer than the device file if it is
+ * tied to other resources in the TEE driver. @close_context is called when the
+ * client closes the device file, even if there are existing references to the
+ * context. The TEE driver can use @close_context to start cleaning up.
*/
struct tee_driver_ops {
void (*get_version)(struct tee_device *teedev,
struct tee_ioctl_version_data *vers);
int (*open)(struct tee_context *ctx);
+ void (*close_context)(struct tee_context *ctx);
void (*release)(struct tee_context *ctx);
int (*open_session)(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
@@ -90,6 +109,9 @@ struct tee_driver_ops {
int (*invoke_func)(struct tee_context *ctx,
struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
+ int (*object_invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_object_invoke_arg *arg,
+ struct tee_param *param);
int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
struct tee_param *param);
@@ -117,6 +139,36 @@ struct tee_desc {
};
/**
+ * struct tee_protmem_pool - protected memory pool
+ * @ops: operations
+ *
+ * This is an abstract interface where this struct is expected to be
+ * embedded in another struct specific to the implementation.
+ */
+struct tee_protmem_pool {
+ const struct tee_protmem_pool_ops *ops;
+};
+
+/**
+ * struct tee_protmem_pool_ops - protected memory pool operations
+ * @alloc: called when allocating protected memory
+ * @free: called when freeing protected memory
+ * @update_shm: called when registering a dma-buf to update the @shm
+ * with physical address of the buffer or to return the
+ * @parent_shm of the memory pool
+ * @destroy_pool: called when destroying the pool
+ */
+struct tee_protmem_pool_ops {
+ int (*alloc)(struct tee_protmem_pool *pool, struct sg_table *sgt,
+ size_t size, size_t *offs);
+ void (*free)(struct tee_protmem_pool *pool, struct sg_table *sgt);
+ int (*update_shm)(struct tee_protmem_pool *pool, struct sg_table *sgt,
+ size_t offs, struct tee_shm *shm,
+ struct tee_shm **parent_shm);
+ void (*destroy_pool)(struct tee_protmem_pool *pool);
+};
+
+/**
* tee_device_alloc() - Allocate a new struct tee_device instance
* @teedesc: Descriptor for this driver
* @dev: Parent device for this device
@@ -154,6 +206,29 @@ int tee_device_register(struct tee_device *teedev);
*/
void tee_device_unregister(struct tee_device *teedev);
+int tee_device_register_dma_heap(struct tee_device *teedev,
+ enum tee_dma_heap_id id,
+ struct tee_protmem_pool *pool);
+void tee_device_put_all_dma_heaps(struct tee_device *teedev);
+
+/**
+ * tee_device_get() - Increment the user count for a tee_device
+ * @teedev: Pointer to the tee_device
+ *
+ * If tee_device_unregister() has been called and the final user of @teedev
+ * has already released the device, this function will fail to prevent new users
+ * from accessing the device during the unregistration process.
+ *
+ * Returns: true if @teedev remains valid, otherwise false
+ */
+bool tee_device_get(struct tee_device *teedev);
+
+/**
+ * tee_device_put() - Decrease the user count for a tee_device
+ * @teedev: pointer to the tee_device
+ */
+void tee_device_put(struct tee_device *teedev);
+
/**
* tee_device_set_dev_groups() - Set device attribute groups
* @teedev: Device to register
@@ -230,6 +305,16 @@ static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
}
/**
+ * tee_protmem_static_pool_alloc() - Create a protected memory manager
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ *
+ * @returns pointer to a 'struct tee_protmem_pool' or an ERR_PTR on failure.
+ */
+struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr,
+ size_t size);
+
+/**
* tee_get_drvdata() - Return driver_data pointer
* @returns the driver_data pointer supplied to tee_register().
*/
@@ -244,6 +329,9 @@ void *tee_get_drvdata(struct tee_device *teedev);
*/
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count);
+
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
@@ -315,4 +403,25 @@ struct tee_context *teedev_open(struct tee_device *teedev);
*/
void teedev_close_context(struct tee_context *ctx);
+/**
+ * teedev_ctx_get() - Increment the reference count of a context
+ * @ctx: Pointer to the context
+ *
+ * This function increases the refcount of the context, which is tied to
+ * resources shared by the same tee_device. During the unregistration process,
+ * the context may remain valid even after tee_device_unregister() has returned.
+ *
+ * Users should ensure that the context's refcount is properly decreased before
+ * calling tee_device_put(), typically within the context's release() function.
+ * Alternatively, users can call tee_device_get() and teedev_ctx_get() together
+ * and release them simultaneously (see shm_alloc_helper()).
+ */
+void teedev_ctx_get(struct tee_context *ctx);
+
+/**
+ * teedev_ctx_put() - Decrease reference count on a context
+ * @ctx: pointer to the context
+ */
+void teedev_ctx_put(struct tee_context *ctx);
+
#endif /*__TEE_CORE_H*/
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index a54c203000ed..88a6f9697c89 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -82,6 +82,16 @@ struct tee_param_memref {
struct tee_shm *shm;
};
+struct tee_param_ubuf {
+ void __user *uaddr;
+ size_t size;
+};
+
+struct tee_param_objref {
+ u64 id;
+ u64 flags;
+};
+
struct tee_param_value {
u64 a;
u64 b;
@@ -92,6 +102,8 @@ struct tee_param {
u64 attr;
union {
struct tee_param_memref memref;
+ struct tee_param_objref objref;
+ struct tee_param_ubuf ubuf;
struct tee_param_value value;
} u;
};
@@ -117,6 +129,16 @@ struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
void *addr, size_t length);
/**
+ * tee_shm_register_fd() - Register shared memory from file descriptor
+ *
+ * @ctx: Context that allocates the shared memory
+ * @fd: Shared memory file descriptor reference
+ *
+ * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd);
+
+/**
* tee_shm_free() - Free shared memory
* @shm: Handle to shared memory to free
*/
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 4e1a672af4c5..58795688a186 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -44,6 +44,12 @@ enum {
UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
};
+/* per NUMA structure for lockless producer usage. */
+struct udp_prod_queue {
+ struct llist_head ll_root ____cacheline_aligned_in_smp;
+ atomic_t rmem_alloc;
+};
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
@@ -90,6 +96,8 @@ struct udp_sock {
struct sk_buff *skb,
int nhoff);
+ struct udp_prod_queue *udp_prod_queue;
+
/* udp_recvmsg try to use this before splicing sk_receive_queue */
struct sk_buff_head reader_queue ____cacheline_aligned_in_smp;
@@ -108,6 +116,7 @@ struct udp_sock {
* the last UDP socket cacheline.
*/
struct hlist_node tunnel_list;
+ struct numa_drop_counters drop_counters;
};
#define udp_test_bit(nr, sk) \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 2e86c653186c..5b127043a151 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -286,8 +286,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
#endif
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
-bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
- unsigned len_mask);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index ee19e9f915b8..22e0dab0809e 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -29,10 +29,32 @@
#define UVC_GUID_EXT_GPIO_CONTROLLER \
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+#define UVC_GUID_CHROMEOS_XU \
+ {0x24, 0xe9, 0xd7, 0x74, 0xc9, 0x49, 0x45, 0x4a, \
+ 0x98, 0xa3, 0xc8, 0x07, 0x7e, 0x05, 0x1c, 0xa3}
#define UVC_GUID_MSXU_1_5 \
{0xdc, 0x95, 0x3f, 0x0f, 0x32, 0x26, 0x4e, 0x4c, \
0x92, 0xc9, 0xa0, 0x47, 0x82, 0xf4, 0x3b, 0xc8}
+/* https://learn.microsoft.com/en-us/windows-hardware/drivers/stream/uvc-extensions-1-5#222-extension-unit-controls */
+#define UVC_MSXU_CONTROL_FOCUS 0x01
+#define UVC_MSXU_CONTROL_EXPOSURE 0x02
+#define UVC_MSXU_CONTROL_EVCOMPENSATION 0x03
+#define UVC_MSXU_CONTROL_WHITEBALANCE 0x04
+#define UVC_MSXU_CONTROL_FACE_AUTHENTICATION 0x06
+#define UVC_MSXU_CONTROL_CAMERA_EXTRINSICS 0x07
+#define UVC_MSXU_CONTROL_CAMERA_INTRINSICS 0x08
+#define UVC_MSXU_CONTROL_METADATA 0x09
+#define UVC_MSXU_CONTROL_IR_TORCH 0x0a
+#define UVC_MSXU_CONTROL_DIGITALWINDOW 0x0b
+#define UVC_MSXU_CONTROL_DIGITALWINDOW_CONFIG 0x0c
+#define UVC_MSXU_CONTROL_VIDEO_HDR 0x0d
+#define UVC_MSXU_CONTROL_FRAMERATE_THROTTLE 0x0e
+#define UVC_MSXU_CONTROL_FIELDOFVIEW2_CONFIG 0x0f
+#define UVC_MSXU_CONTROL_FIELDOFVIEW2 0x10
+
+#define UVC_CROSXU_CONTROL_IQ_PROFILE 0x04
+
#define UVC_GUID_FORMAT_MJPEG \
{ 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 219037f4c08d..9609cf365e8e 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -50,7 +50,7 @@
*
* Author: Bill Dirks <bill@thedirks.org>
* Justin Schoeman
- * Hans Verkuil <hverkuil@xs4all.nl>
+ * Hans Verkuil <hverkuil@kernel.org>
* et al.
*/
#ifndef __LINUX_VIDEODEV2_H
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 9e15a088ba38..92f80b4d69a6 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -2,6 +2,8 @@
#ifndef VM_EVENT_ITEM_H_INCLUDED
#define VM_EVENT_ITEM_H_INCLUDED
+#include <linux/thread_info.h>
+
#ifdef CONFIG_ZONE_DMA
#define DMA_ZONE(xx) xx##_DMA,
#else
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 2759dac6be44..eb54b7b3202f 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -197,9 +197,15 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
-void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
- __realloc_size(2);
-#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
+void *__must_check vrealloc_node_align_noprof(const void *p, size_t size,
+ unsigned long align, gfp_t flags, int nid) __realloc_size(2);
+#define vrealloc_node_noprof(_p, _s, _f, _nid) \
+ vrealloc_node_align_noprof(_p, _s, 1, _f, _nid)
+#define vrealloc_noprof(_p, _s, _f) \
+ vrealloc_node_align_noprof(_p, _s, 1, _f, NUMA_NO_NODE)
+#define vrealloc_node_align(...) alloc_hooks(vrealloc_node_align_noprof(__VA_ARGS__))
+#define vrealloc_node(...) alloc_hooks(vrealloc_node_noprof(__VA_ARGS__))
+#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 09855d819418..f648044466d5 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -965,6 +965,18 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
__ret; \
})
+#define __wait_event_state_exclusive(wq, condition, state) \
+ ___wait_event(wq, condition, state, 1, 0, schedule())
+
+#define wait_event_state_exclusive(wq, condition, state) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_state_exclusive(wq, condition, state); \
+ __ret; \
+})
+
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
TASK_KILLABLE, 0, timeout, \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 15a4bc4ab819..22dd4adc5667 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -362,12 +362,6 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb);
struct folio *writeback_iter(struct address_space *mapping,
struct writeback_control *wbc, struct folio *folio, int *error);
-typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
- void *data);
-
-int write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc, writepage_t writepage,
- void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
deleted file mode 100644
index 369ef068fad8..000000000000
--- a/include/linux/zpool.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * zpool memory storage api
- *
- * Copyright (C) 2014 Dan Streetman
- *
- * This is a common frontend for the zswap compressed memory storage
- * implementations.
- */
-
-#ifndef _ZPOOL_H_
-#define _ZPOOL_H_
-
-struct zpool;
-
-bool zpool_has_pool(char *type);
-
-struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp);
-
-const char *zpool_get_type(struct zpool *pool);
-
-void zpool_destroy_pool(struct zpool *pool);
-
-int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
- unsigned long *handle, const int nid);
-
-void zpool_free(struct zpool *pool, unsigned long handle);
-
-void *zpool_obj_read_begin(struct zpool *zpool, unsigned long handle,
- void *local_copy);
-
-void zpool_obj_read_end(struct zpool *zpool, unsigned long handle,
- void *handle_mem);
-
-void zpool_obj_write(struct zpool *zpool, unsigned long handle,
- void *handle_mem, size_t mem_len);
-
-u64 zpool_get_total_pages(struct zpool *pool);
-
-
-/**
- * struct zpool_driver - driver implementation for zpool
- * @type: name of the driver.
- * @list: entry in the list of zpool drivers.
- * @create: create a new pool.
- * @destroy: destroy a pool.
- * @malloc: allocate mem from a pool.
- * @free: free mem from a pool.
- * @sleep_mapped: whether zpool driver can sleep during map.
- * @map: map a handle.
- * @unmap: unmap a handle.
- * @total_size: get total size of a pool.
- *
- * This is created by a zpool implementation and registered
- * with zpool.
- */
-struct zpool_driver {
- char *type;
- struct module *owner;
- atomic_t refcount;
- struct list_head list;
-
- void *(*create)(const char *name, gfp_t gfp);
- void (*destroy)(void *pool);
-
- int (*malloc)(void *pool, size_t size, gfp_t gfp,
- unsigned long *handle, const int nid);
- void (*free)(void *pool, unsigned long handle);
-
- void *(*obj_read_begin)(void *pool, unsigned long handle,
- void *local_copy);
- void (*obj_read_end)(void *pool, unsigned long handle,
- void *handle_mem);
- void (*obj_write)(void *pool, unsigned long handle,
- void *handle_mem, size_t mem_len);
-
- u64 (*total_pages)(void *pool);
-};
-
-void zpool_register_driver(struct zpool_driver *driver);
-
-int zpool_unregister_driver(struct zpool_driver *driver);
-
-bool zpool_can_sleep_mapped(struct zpool *pool);
-
-#endif
diff --git a/include/media/cadence/cdns-csi2rx.h b/include/media/cadence/cdns-csi2rx.h
new file mode 100644
index 000000000000..782d03fc36d1
--- /dev/null
+++ b/include/media/cadence/cdns-csi2rx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _CDNS_CSI2RX_H
+#define _CDNS_CSI2RX_H
+
+#include <media/v4l2-subdev.h>
+
+/**
+ * cdns_csi2rx_negotiate_ppc - Negotiate pixel-per-clock on output interface
+ *
+ * @subdev: point to &struct v4l2_subdev
+ * @pad: pad number of the source pad
+ * @ppc: pointer to requested pixel-per-clock value
+ *
+ * Returns 0 on success, negative error code otherwise.
+ */
+int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad,
+ u8 *ppc);
+
+#endif
diff --git a/include/media/drv-intf/cx25840.h b/include/media/drv-intf/cx25840.h
index ba69bc525382..8b455d9dd5ca 100644
--- a/include/media/drv-intf/cx25840.h
+++ b/include/media/drv-intf/cx25840.h
@@ -3,7 +3,7 @@
/*
* cx25840.h - definition for cx25840/1/2/3 inputs
*
- * Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ * Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
#ifndef _CX25840_H_
diff --git a/include/media/drv-intf/msp3400.h b/include/media/drv-intf/msp3400.h
index d6dfae104a6f..853258ee6bbd 100644
--- a/include/media/drv-intf/msp3400.h
+++ b/include/media/drv-intf/msp3400.h
@@ -2,7 +2,7 @@
/*
msp3400.h - definition for msp3400 inputs and outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/bt819.h b/include/media/i2c/bt819.h
index 70aa46bd5182..2277a7eb9548 100644
--- a/include/media/i2c/bt819.h
+++ b/include/media/i2c/bt819.h
@@ -2,7 +2,7 @@
/*
bt819.h - bt819 notifications
- Copyright (C) 2009 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2009 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/cs5345.h b/include/media/i2c/cs5345.h
index d41e4dca3fcc..39e1cf6c1a2f 100644
--- a/include/media/i2c/cs5345.h
+++ b/include/media/i2c/cs5345.h
@@ -2,7 +2,7 @@
/*
cs5345.h - definition for cs5345 inputs and outputs
- Copyright (C) 2007 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2007 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/cs53l32a.h b/include/media/i2c/cs53l32a.h
index 52ceb2f916d3..777f667855cb 100644
--- a/include/media/i2c/cs53l32a.h
+++ b/include/media/i2c/cs53l32a.h
@@ -2,7 +2,7 @@
/*
cs53l32a.h - definition for cs53l32a inputs and outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/m52790.h b/include/media/i2c/m52790.h
index 3f214fa9bc64..cedaaf215273 100644
--- a/include/media/i2c/m52790.h
+++ b/include/media/i2c/m52790.h
@@ -2,7 +2,7 @@
/*
m52790.h - definition for m52790 inputs and outputs
- Copyright (C) 2007 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2007 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/mt9v011.h b/include/media/i2c/mt9v011.h
index 41c00b3e7184..552839756e64 100644
--- a/include/media/i2c/mt9v011.h
+++ b/include/media/i2c/mt9v011.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* mt9v011 sensor
*
- * Copyright (C) 2011 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2011 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef __MT9V011_H__
diff --git a/include/media/i2c/mt9v022.h b/include/media/i2c/mt9v022.h
deleted file mode 100644
index 6966eb538165..000000000000
--- a/include/media/i2c/mt9v022.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * mt9v022 sensor
- */
-
-#ifndef __MT9V022_H__
-#define __MT9V022_H__
-
-struct mt9v022_platform_data {
- unsigned short y_skip_top; /* Lines to skip at the top */
-};
-
-#endif
diff --git a/include/media/i2c/mt9v032.h b/include/media/i2c/mt9v032.h
deleted file mode 100644
index 83a37ccfb649..000000000000
--- a/include/media/i2c/mt9v032.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MEDIA_MT9V032_H
-#define _MEDIA_MT9V032_H
-
-struct mt9v032_platform_data {
- unsigned int clk_pol:1;
-
- const s64 *link_freqs;
- s64 link_def_freq;
-};
-
-#endif
diff --git a/include/media/i2c/saa7115.h b/include/media/i2c/saa7115.h
index 0cd6080d7cb1..a607c91ef5f3 100644
--- a/include/media/i2c/saa7115.h
+++ b/include/media/i2c/saa7115.h
@@ -2,7 +2,7 @@
/*
saa7115.h - definition for saa7111/3/4/5 inputs and frequency flags
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/saa7127.h b/include/media/i2c/saa7127.h
index 53ee999e6090..c81ee1743df1 100644
--- a/include/media/i2c/saa7127.h
+++ b/include/media/i2c/saa7127.h
@@ -2,7 +2,7 @@
/*
saa7127.h - definition for saa7126/7/8/9 inputs/outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/ths7303.h b/include/media/i2c/ths7303.h
index fc937025cdb4..7eda467b6725 100644
--- a/include/media/i2c/ths7303.h
+++ b/include/media/i2c/ths7303.h
@@ -5,7 +5,7 @@
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates.
*
* Contributors:
- * Hans Verkuil <hansverk@cisco.com>
+ * Hans Verkuil <hverkuil@kernel.org>
* Lad, Prabhakar <prabhakar.lad@ti.com>
* Martin Bugge <marbugge@cisco.com>
*/
diff --git a/include/media/i2c/tvaudio.h b/include/media/i2c/tvaudio.h
index 42cd3206fb6c..206f42ed4e69 100644
--- a/include/media/i2c/tvaudio.h
+++ b/include/media/i2c/tvaudio.h
@@ -2,7 +2,7 @@
/*
tvaudio.h - definition for tvaudio inputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/i2c/upd64031a.h b/include/media/i2c/upd64031a.h
index b6570abc84ef..d39b2b7f0cf3 100644
--- a/include/media/i2c/upd64031a.h
+++ b/include/media/i2c/upd64031a.h
@@ -2,7 +2,7 @@
/*
* upd64031a - NEC Electronics Ghost Reduction input defines
*
- * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
+ * 2006 by Hans Verkuil (hverkuil@kernel.org)
*/
#ifndef _UPD64031A_H_
diff --git a/include/media/i2c/upd64083.h b/include/media/i2c/upd64083.h
index 17fb7b5201cc..72cf547c25fc 100644
--- a/include/media/i2c/upd64083.h
+++ b/include/media/i2c/upd64083.h
@@ -2,7 +2,7 @@
/*
* upd6408x - NEC Electronics 3-Dimensional Y/C separation input defines
*
- * 2006 by Hans Verkuil (hverkuil@xs4all.nl)
+ * 2006 by Hans Verkuil (hverkuil@kernel.org)
*/
#ifndef _UPD64083_H_
diff --git a/include/media/i2c/wm8775.h b/include/media/i2c/wm8775.h
index 6ccdeb3817ab..a02695ee3a58 100644
--- a/include/media/i2c/wm8775.h
+++ b/include/media/i2c/wm8775.h
@@ -2,7 +2,7 @@
/*
wm8775.h - definition for wm8775 inputs and outputs
- Copyright (C) 2006 Hans Verkuil (hverkuil@xs4all.nl)
+ Copyright (C) 2006 Hans Verkuil (hverkuil@kernel.org)
*/
diff --git a/include/media/media-request.h b/include/media/media-request.h
index d4ac557678a7..bb500b2f9da4 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -5,7 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
* Copyright (C) 2018 Intel Corporation
*
- * Author: Hans Verkuil <hansverk@cisco.com>
+ * Author: Hans Verkuil <hverkuil@kernel.org>
* Author: Sakari Ailus <sakari.ailus@linux.intel.com>
*/
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 0a43f56578bc..5c0a7f6b5bb6 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -7,7 +7,7 @@
Each ioctl begins with VIDIOC_INT_ to clearly mark that it is an internal
define,
- Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -97,6 +97,7 @@ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl,
/* ------------------------------------------------------------------------- */
+struct clk;
struct v4l2_device;
struct v4l2_subdev;
struct v4l2_subdev_ops;
@@ -559,17 +560,18 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
/**
* v4l2_get_link_freq - Get link rate from transmitter
*
- * @pad: The transmitter's media pad (or control handler for non-MC users or
- * compatibility reasons, don't use in new code)
+ * @pad: The transmitter's media pad
* @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
* D-PHY, samples per clock on parallel. 0 otherwise.
* @div: The divisor between pixel rate and link frequency. Number of data lanes
* times two on D-PHY, 1 on parallel. 0 otherwise.
*
- * This function is intended for obtaining the link frequency from the
- * transmitter sub-devices. It returns the link rate, either from the
- * V4L2_CID_LINK_FREQ control implemented by the transmitter, or value
- * calculated based on the V4L2_CID_PIXEL_RATE implemented by the transmitter.
+ * This function obtains and returns the link frequency from the transmitter
+ * sub-device's pad. The link frequency is retrieved using the get_mbus_config
+ * sub-device pad operation. If this fails, the function falls back to obtaining
+ * the frequency either directly from the V4L2_CID_LINK_FREQ control if
+ * implemented by the transmitter, or by calculating it from the pixel rate
+ * obtained from the V4L2_CID_PIXEL_RATE control.
*
* Return:
* * >0: Link frequency
@@ -577,19 +579,9 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
* * %-EINVAL: Invalid link frequency value
*/
#ifdef CONFIG_MEDIA_CONTROLLER
-#define v4l2_get_link_freq(pad, mul, div) \
- _Generic(pad, \
- struct media_pad *: __v4l2_get_link_freq_pad, \
- struct v4l2_ctrl_handler *: __v4l2_get_link_freq_ctrl) \
- (pad, mul, div)
-s64 __v4l2_get_link_freq_pad(struct media_pad *pad, unsigned int mul,
- unsigned int div);
-#else
-#define v4l2_get_link_freq(handler, mul, div) \
- __v4l2_get_link_freq_ctrl(handler, mul, div)
+s64 v4l2_get_link_freq(const struct media_pad *pad, unsigned int mul,
+ unsigned int div);
#endif
-s64 __v4l2_get_link_freq_ctrl(struct v4l2_ctrl_handler *handler,
- unsigned int mul, unsigned int div);
void v4l2_simplify_fraction(u32 *numerator, u32 *denominator,
unsigned int n_terms, unsigned int threshold);
@@ -620,6 +612,79 @@ int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs,
unsigned int num_of_driver_link_freqs,
unsigned long *bitmap);
+struct clk *__devm_v4l2_sensor_clk_get(struct device *dev, const char *id,
+ bool legacy, bool fixed_rate,
+ unsigned long clk_rate);
+
+/**
+ * devm_v4l2_sensor_clk_get - lookup and obtain a reference to a clock producer
+ * for a camera sensor
+ *
+ * @dev: device for v4l2 sensor clock "consumer"
+ * @id: clock consumer ID
+ *
+ * This function behaves the same way as devm_clk_get() except where there
+ * is no clock producer like in ACPI-based platforms.
+ *
+ * For ACPI-based platforms, the function will read the "clock-frequency"
+ * ACPI _DSD property and register a fixed-clock with the frequency indicated
+ * in the property.
+ *
+ * This function also handles the special ACPI-based system case where:
+ *
+ * * The clock-frequency _DSD property is present.
+ * * A reference to the clock producer is present, where the clock is provided
+ * by a camera sensor PMIC driver (e.g. int3472/tps68470.c)
+ *
+ * In this case try to set the clock-frequency value to the provided clock.
+ *
+ * As the name indicates, this function may only be used on camera sensor
+ * devices. This is because generally only camera sensors do need a clock to
+ * query the frequency from, due to the requirement to configure the PLL for a
+ * given CSI-2 interface frequency where the sensor's external clock frequency
+ * is a factor. Additionally, the clock frequency tends to be available on ACPI
+ * firmware based systems for camera sensors specifically (if e.g. DisCo for
+ * Imaging compliant).
+ *
+ * Returns a pointer to a struct clk on success or an error pointer on failure.
+ */
+static inline struct clk *
+devm_v4l2_sensor_clk_get(struct device *dev, const char *id)
+{
+ return __devm_v4l2_sensor_clk_get(dev, id, false, false, 0);
+}
+
+/**
+ * devm_v4l2_sensor_clk_get_legacy - lookup and obtain a reference to a clock
+ * producer for a camera sensor.
+ *
+ * @dev: device for v4l2 sensor clock "consumer"
+ * @id: clock consumer ID
+ * @fixed_rate: interpret the @clk_rate as a fixed rate or default rate
+ * @clk_rate: the clock rate
+ *
+ * This function behaves the same way as devm_v4l2_sensor_clk_get() except that
+ * it extends the behaviour on ACPI platforms to all platforms.
+ *
+ * The function also provides the ability to set the clock rate to a fixed
+ * frequency by setting @fixed_rate to true and specifying the fixed frequency
+ * in @clk_rate, or to use a default clock rate when the "clock-frequency"
+ * property is absent by setting @fixed_rate to false and specifying the default
+ * frequency in @clk_rate. Setting @fixed_rate to true and @clk_rate to 0 is an
+ * error.
+ *
+ * This function is meant to support legacy behaviour in existing drivers only.
+ * It must not be used in any new driver.
+ *
+ * Returns a pointer to a struct clk on success or an error pointer on failure.
+ */
+static inline struct clk *
+devm_v4l2_sensor_clk_get_legacy(struct device *dev, const char *id,
+ bool fixed_rate, unsigned long clk_rate)
+{
+ return __devm_v4l2_sensor_clk_get(dev, id, true, fixed_rate, clk_rate);
+}
+
static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
{
/*
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index c32c46286441..31fc1bee3797 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -2,7 +2,7 @@
/*
* V4L2 controls support header.
*
- * Copyright (C) 2010 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2010 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _V4L2_CTRLS_H
@@ -1313,13 +1313,13 @@ void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new);
* v4l2_ctrl_log_status - helper function to implement %VIDIOC_LOG_STATUS ioctl
*
* @file: pointer to struct file
- * @fh: unused. Kept just to be compatible to the arguments expected by
+ * @priv: unused. Kept just to be compatible to the arguments expected by
* &struct v4l2_ioctl_ops.vidioc_log_status.
*
* Can be used as a vidioc_log_status function that just dumps all controls
* associated with the filehandle.
*/
-int v4l2_ctrl_log_status(struct file *file, void *fh);
+int v4l2_ctrl_log_status(struct file *file, void *priv);
/**
* v4l2_ctrl_subscribe_event - Subscribes to an event
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index a69801274800..a213c3398dcf 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -74,7 +74,7 @@ struct dentry;
* @V4L2_FL_USES_V4L2_FH:
* indicates that file->private_data points to &struct v4l2_fh.
* This flag is set by the core when v4l2_fh_init() is called.
- * All new drivers should use it.
+ * All drivers must use it.
* @V4L2_FL_QUIRK_INVERTED_CROP:
* some old M2M drivers use g/s_crop/cropcap incorrectly: crop and
* compose are swapped. If this flag is set, then the selection
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index dd897a362f36..25f69b1b8db0 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -2,7 +2,7 @@
/*
V4L2 device support header.
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
index 714075c72f77..2b42e5d81f9e 100644
--- a/include/media/v4l2-dv-timings.h
+++ b/include/media/v4l2-dv-timings.h
@@ -275,6 +275,7 @@ int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
#define V4L2_DEBUGFS_IF_AUDIO BIT(1)
#define V4L2_DEBUGFS_IF_SPD BIT(2)
#define V4L2_DEBUGFS_IF_HDMI BIT(3)
+#define V4L2_DEBUGFS_IF_DRM BIT(4)
typedef ssize_t (*v4l2_debugfs_if_read_t)(u32 type, void *priv,
struct file *filp, char __user *ubuf,
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index b5b3e00c8e6a..aad4b3689d7e 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -3,7 +3,7 @@
* v4l2-fh.h
*
* V4L2 file handle. Store per file handle data for the V4L2
- * framework. Using file handles is optional for the drivers.
+ * framework. Using file handles is mandatory for the drivers.
*
* Copyright (C) 2009--2010 Nokia Corporation.
*
@@ -57,6 +57,20 @@ struct v4l2_fh {
};
/**
+ * file_to_v4l2_fh - Return the v4l2_fh associated with a struct file
+ *
+ * @filp: pointer to &struct file
+ *
+ * This function should be used by drivers to retrieve the &struct v4l2_fh
+ * instance pointer stored in the file private_data instead of accessing the
+ * private_data field directly.
+ */
+static inline struct v4l2_fh *file_to_v4l2_fh(struct file *filp)
+{
+ return filp->private_data;
+}
+
+/**
* v4l2_fh_init - Initialise the file handle.
*
* @fh: pointer to &struct v4l2_fh
@@ -73,11 +87,14 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev);
* v4l2_fh_add - Add the fh to the list of file handles on a video_device.
*
* @fh: pointer to &struct v4l2_fh
+ * @filp: pointer to &struct file associated with @fh
+ *
+ * The function sets filp->private_data to point to @fh.
*
* .. note::
* The @fh file handle must be initialised first.
*/
-void v4l2_fh_add(struct v4l2_fh *fh);
+void v4l2_fh_add(struct v4l2_fh *fh, struct file *filp);
/**
* v4l2_fh_open - Ancillary routine that can be used as the open\(\) op
@@ -87,6 +104,9 @@ void v4l2_fh_add(struct v4l2_fh *fh);
*
* It allocates a v4l2_fh and inits and adds it to the &struct video_device
* associated with the file pointer.
+ *
+ * On error filp->private_data will be %NULL, otherwise it will point to
+ * the &struct v4l2_fh.
*/
int v4l2_fh_open(struct file *filp);
@@ -94,15 +114,15 @@ int v4l2_fh_open(struct file *filp);
* v4l2_fh_del - Remove file handle from the list of file handles.
*
* @fh: pointer to &struct v4l2_fh
+ * @filp: pointer to &struct file associated with @fh
*
- * On error filp->private_data will be %NULL, otherwise it will point to
- * the &struct v4l2_fh.
+ * The function resets filp->private_data to NULL.
*
* .. note::
* Must be called in v4l2_file_operations->release\(\) handler if the driver
* uses &struct v4l2_fh.
*/
-void v4l2_fh_del(struct v4l2_fh *fh);
+void v4l2_fh_del(struct v4l2_fh *fh, struct file *filp);
/**
* v4l2_fh_exit - Release resources related to a file handle.
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 82695c3a300a..6f7a58350441 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -293,144 +293,144 @@ struct v4l2_ioctl_ops {
/* ioctl callbacks */
/* VIDIOC_QUERYCAP handler */
- int (*vidioc_querycap)(struct file *file, void *fh,
+ int (*vidioc_querycap)(struct file *file, void *priv,
struct v4l2_capability *cap);
/* VIDIOC_ENUM_FMT handlers */
- int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
- int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_enum_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
- int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_out_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sliced_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_cap_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_vid_out_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_g_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
- int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_out_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sliced_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_cap_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_vid_out_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_s_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
- int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_overlay)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *fh,
- struct v4l2_format *f);
- int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_out_overlay)(struct file *file, void *priv,
+ struct v4l2_format *f);
+ int (*vidioc_try_fmt_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sliced_vbi_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_cap_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_vid_out_mplane)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sdr_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_sdr_out)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_meta_cap)(struct file *file, void *priv,
struct v4l2_format *f);
- int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh,
+ int (*vidioc_try_fmt_meta_out)(struct file *file, void *priv,
struct v4l2_format *f);
/* Buffer handlers */
- int (*vidioc_reqbufs)(struct file *file, void *fh,
+ int (*vidioc_reqbufs)(struct file *file, void *priv,
struct v4l2_requestbuffers *b);
- int (*vidioc_querybuf)(struct file *file, void *fh,
+ int (*vidioc_querybuf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_qbuf)(struct file *file, void *fh,
+ int (*vidioc_qbuf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_expbuf)(struct file *file, void *fh,
+ int (*vidioc_expbuf)(struct file *file, void *priv,
struct v4l2_exportbuffer *e);
- int (*vidioc_dqbuf)(struct file *file, void *fh,
+ int (*vidioc_dqbuf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_create_bufs)(struct file *file, void *fh,
+ int (*vidioc_create_bufs)(struct file *file, void *priv,
struct v4l2_create_buffers *b);
- int (*vidioc_prepare_buf)(struct file *file, void *fh,
+ int (*vidioc_prepare_buf)(struct file *file, void *priv,
struct v4l2_buffer *b);
- int (*vidioc_remove_bufs)(struct file *file, void *fh,
+ int (*vidioc_remove_bufs)(struct file *file, void *priv,
struct v4l2_remove_buffers *d);
- int (*vidioc_overlay)(struct file *file, void *fh, unsigned int i);
- int (*vidioc_g_fbuf)(struct file *file, void *fh,
+ int (*vidioc_overlay)(struct file *file, void *priv, unsigned int i);
+ int (*vidioc_g_fbuf)(struct file *file, void *priv,
struct v4l2_framebuffer *a);
- int (*vidioc_s_fbuf)(struct file *file, void *fh,
+ int (*vidioc_s_fbuf)(struct file *file, void *priv,
const struct v4l2_framebuffer *a);
/* Stream on/off */
- int (*vidioc_streamon)(struct file *file, void *fh,
+ int (*vidioc_streamon)(struct file *file, void *priv,
enum v4l2_buf_type i);
- int (*vidioc_streamoff)(struct file *file, void *fh,
+ int (*vidioc_streamoff)(struct file *file, void *priv,
enum v4l2_buf_type i);
/*
@@ -438,135 +438,135 @@ struct v4l2_ioctl_ops {
*
* Note: ENUMSTD is handled by videodev.c
*/
- int (*vidioc_g_std)(struct file *file, void *fh, v4l2_std_id *norm);
- int (*vidioc_s_std)(struct file *file, void *fh, v4l2_std_id norm);
- int (*vidioc_querystd)(struct file *file, void *fh, v4l2_std_id *a);
+ int (*vidioc_g_std)(struct file *file, void *priv, v4l2_std_id *norm);
+ int (*vidioc_s_std)(struct file *file, void *priv, v4l2_std_id norm);
+ int (*vidioc_querystd)(struct file *file, void *priv, v4l2_std_id *a);
/* Input handling */
- int (*vidioc_enum_input)(struct file *file, void *fh,
+ int (*vidioc_enum_input)(struct file *file, void *priv,
struct v4l2_input *inp);
- int (*vidioc_g_input)(struct file *file, void *fh, unsigned int *i);
- int (*vidioc_s_input)(struct file *file, void *fh, unsigned int i);
+ int (*vidioc_g_input)(struct file *file, void *priv, unsigned int *i);
+ int (*vidioc_s_input)(struct file *file, void *priv, unsigned int i);
/* Output handling */
- int (*vidioc_enum_output)(struct file *file, void *fh,
+ int (*vidioc_enum_output)(struct file *file, void *priv,
struct v4l2_output *a);
- int (*vidioc_g_output)(struct file *file, void *fh, unsigned int *i);
- int (*vidioc_s_output)(struct file *file, void *fh, unsigned int i);
+ int (*vidioc_g_output)(struct file *file, void *priv, unsigned int *i);
+ int (*vidioc_s_output)(struct file *file, void *priv, unsigned int i);
/* Control handling */
- int (*vidioc_query_ext_ctrl)(struct file *file, void *fh,
+ int (*vidioc_query_ext_ctrl)(struct file *file, void *priv,
struct v4l2_query_ext_ctrl *a);
- int (*vidioc_g_ext_ctrls)(struct file *file, void *fh,
+ int (*vidioc_g_ext_ctrls)(struct file *file, void *priv,
struct v4l2_ext_controls *a);
- int (*vidioc_s_ext_ctrls)(struct file *file, void *fh,
+ int (*vidioc_s_ext_ctrls)(struct file *file, void *priv,
struct v4l2_ext_controls *a);
- int (*vidioc_try_ext_ctrls)(struct file *file, void *fh,
+ int (*vidioc_try_ext_ctrls)(struct file *file, void *priv,
struct v4l2_ext_controls *a);
- int (*vidioc_querymenu)(struct file *file, void *fh,
+ int (*vidioc_querymenu)(struct file *file, void *priv,
struct v4l2_querymenu *a);
/* Audio ioctls */
- int (*vidioc_enumaudio)(struct file *file, void *fh,
+ int (*vidioc_enumaudio)(struct file *file, void *priv,
struct v4l2_audio *a);
- int (*vidioc_g_audio)(struct file *file, void *fh,
+ int (*vidioc_g_audio)(struct file *file, void *priv,
struct v4l2_audio *a);
- int (*vidioc_s_audio)(struct file *file, void *fh,
+ int (*vidioc_s_audio)(struct file *file, void *priv,
const struct v4l2_audio *a);
/* Audio out ioctls */
- int (*vidioc_enumaudout)(struct file *file, void *fh,
+ int (*vidioc_enumaudout)(struct file *file, void *priv,
struct v4l2_audioout *a);
- int (*vidioc_g_audout)(struct file *file, void *fh,
+ int (*vidioc_g_audout)(struct file *file, void *priv,
struct v4l2_audioout *a);
- int (*vidioc_s_audout)(struct file *file, void *fh,
+ int (*vidioc_s_audout)(struct file *file, void *priv,
const struct v4l2_audioout *a);
- int (*vidioc_g_modulator)(struct file *file, void *fh,
+ int (*vidioc_g_modulator)(struct file *file, void *priv,
struct v4l2_modulator *a);
- int (*vidioc_s_modulator)(struct file *file, void *fh,
+ int (*vidioc_s_modulator)(struct file *file, void *priv,
const struct v4l2_modulator *a);
/* Crop ioctls */
- int (*vidioc_g_pixelaspect)(struct file *file, void *fh,
+ int (*vidioc_g_pixelaspect)(struct file *file, void *priv,
int buf_type, struct v4l2_fract *aspect);
- int (*vidioc_g_selection)(struct file *file, void *fh,
+ int (*vidioc_g_selection)(struct file *file, void *priv,
struct v4l2_selection *s);
- int (*vidioc_s_selection)(struct file *file, void *fh,
+ int (*vidioc_s_selection)(struct file *file, void *priv,
struct v4l2_selection *s);
/* Compression ioctls */
- int (*vidioc_g_jpegcomp)(struct file *file, void *fh,
+ int (*vidioc_g_jpegcomp)(struct file *file, void *priv,
struct v4l2_jpegcompression *a);
- int (*vidioc_s_jpegcomp)(struct file *file, void *fh,
+ int (*vidioc_s_jpegcomp)(struct file *file, void *priv,
const struct v4l2_jpegcompression *a);
- int (*vidioc_g_enc_index)(struct file *file, void *fh,
+ int (*vidioc_g_enc_index)(struct file *file, void *priv,
struct v4l2_enc_idx *a);
- int (*vidioc_encoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_encoder_cmd)(struct file *file, void *priv,
struct v4l2_encoder_cmd *a);
- int (*vidioc_try_encoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_try_encoder_cmd)(struct file *file, void *priv,
struct v4l2_encoder_cmd *a);
- int (*vidioc_decoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_decoder_cmd)(struct file *file, void *priv,
struct v4l2_decoder_cmd *a);
- int (*vidioc_try_decoder_cmd)(struct file *file, void *fh,
+ int (*vidioc_try_decoder_cmd)(struct file *file, void *priv,
struct v4l2_decoder_cmd *a);
/* Stream type-dependent parameter ioctls */
- int (*vidioc_g_parm)(struct file *file, void *fh,
+ int (*vidioc_g_parm)(struct file *file, void *priv,
struct v4l2_streamparm *a);
- int (*vidioc_s_parm)(struct file *file, void *fh,
+ int (*vidioc_s_parm)(struct file *file, void *priv,
struct v4l2_streamparm *a);
/* Tuner ioctls */
- int (*vidioc_g_tuner)(struct file *file, void *fh,
+ int (*vidioc_g_tuner)(struct file *file, void *priv,
struct v4l2_tuner *a);
- int (*vidioc_s_tuner)(struct file *file, void *fh,
+ int (*vidioc_s_tuner)(struct file *file, void *priv,
const struct v4l2_tuner *a);
- int (*vidioc_g_frequency)(struct file *file, void *fh,
+ int (*vidioc_g_frequency)(struct file *file, void *priv,
struct v4l2_frequency *a);
- int (*vidioc_s_frequency)(struct file *file, void *fh,
+ int (*vidioc_s_frequency)(struct file *file, void *priv,
const struct v4l2_frequency *a);
- int (*vidioc_enum_freq_bands)(struct file *file, void *fh,
+ int (*vidioc_enum_freq_bands)(struct file *file, void *priv,
struct v4l2_frequency_band *band);
/* Sliced VBI cap */
- int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *fh,
+ int (*vidioc_g_sliced_vbi_cap)(struct file *file, void *priv,
struct v4l2_sliced_vbi_cap *a);
/* Log status ioctl */
- int (*vidioc_log_status)(struct file *file, void *fh);
+ int (*vidioc_log_status)(struct file *file, void *priv);
- int (*vidioc_s_hw_freq_seek)(struct file *file, void *fh,
+ int (*vidioc_s_hw_freq_seek)(struct file *file, void *priv,
const struct v4l2_hw_freq_seek *a);
/* Debugging ioctls */
#ifdef CONFIG_VIDEO_ADV_DEBUG
- int (*vidioc_g_register)(struct file *file, void *fh,
+ int (*vidioc_g_register)(struct file *file, void *priv,
struct v4l2_dbg_register *reg);
- int (*vidioc_s_register)(struct file *file, void *fh,
+ int (*vidioc_s_register)(struct file *file, void *priv,
const struct v4l2_dbg_register *reg);
- int (*vidioc_g_chip_info)(struct file *file, void *fh,
+ int (*vidioc_g_chip_info)(struct file *file, void *priv,
struct v4l2_dbg_chip_info *chip);
#endif
- int (*vidioc_enum_framesizes)(struct file *file, void *fh,
+ int (*vidioc_enum_framesizes)(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize);
- int (*vidioc_enum_frameintervals)(struct file *file, void *fh,
+ int (*vidioc_enum_frameintervals)(struct file *file, void *priv,
struct v4l2_frmivalenum *fival);
/* DV Timings IOCTLs */
- int (*vidioc_s_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_s_dv_timings)(struct file *file, void *priv,
struct v4l2_dv_timings *timings);
- int (*vidioc_g_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_g_dv_timings)(struct file *file, void *priv,
struct v4l2_dv_timings *timings);
- int (*vidioc_query_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_query_dv_timings)(struct file *file, void *priv,
struct v4l2_dv_timings *timings);
- int (*vidioc_enum_dv_timings)(struct file *file, void *fh,
+ int (*vidioc_enum_dv_timings)(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings);
- int (*vidioc_dv_timings_cap)(struct file *file, void *fh,
+ int (*vidioc_dv_timings_cap)(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap);
- int (*vidioc_g_edid)(struct file *file, void *fh,
+ int (*vidioc_g_edid)(struct file *file, void *priv,
struct v4l2_edid *edid);
- int (*vidioc_s_edid)(struct file *file, void *fh,
+ int (*vidioc_s_edid)(struct file *file, void *priv,
struct v4l2_edid *edid);
int (*vidioc_subscribe_event)(struct v4l2_fh *fh,
@@ -575,7 +575,7 @@ struct v4l2_ioctl_ops {
const struct v4l2_event_subscription *sub);
/* For other private ioctls */
- long (*vidioc_default)(struct file *file, void *fh,
+ long (*vidioc_default)(struct file *file, void *priv,
bool valid_prio, unsigned int cmd, void *arg);
};
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0af330cf91c3..09c6164577cc 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -864,34 +864,34 @@ void v4l2_m2m_request_queue(struct media_request *req);
/* v4l2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *rb);
-int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
- struct v4l2_create_buffers *create);
+ struct v4l2_requestbuffers *rb);
+int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *create);
int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
struct v4l2_remove_buffers *d);
-int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
- struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
- struct v4l2_exportbuffer *eb);
-int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
- struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
+int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
+ struct v4l2_exportbuffer *eb);
+int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf);
+int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *buf);
-int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
- enum v4l2_buf_type type);
-int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
- enum v4l2_buf_type type);
-int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type);
+int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type);
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec);
-int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
-int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec);
-int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
-int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *dc);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5dcf4065708f..e0bb58cb6d04 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -2,7 +2,7 @@
/*
* V4L2 sub-device support header.
*
- * Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _V4L2_SUBDEV_H
@@ -36,6 +36,7 @@ struct v4l2_event_subscription;
struct v4l2_fh;
struct v4l2_subdev;
struct v4l2_subdev_fh;
+struct v4l2_subdev_stream_config;
struct tuner_setup;
struct v4l2_mbus_frame_desc;
struct led_classdev;
@@ -684,30 +685,6 @@ struct v4l2_subdev_pad_config {
};
/**
- * struct v4l2_subdev_stream_config - Used for storing stream configuration.
- *
- * @pad: pad number
- * @stream: stream number
- * @enabled: has the stream been enabled with v4l2_subdev_enable_streams()
- * @fmt: &struct v4l2_mbus_framefmt
- * @crop: &struct v4l2_rect to be used for crop
- * @compose: &struct v4l2_rect to be used for compose
- * @interval: frame interval
- *
- * This structure stores configuration for a stream.
- */
-struct v4l2_subdev_stream_config {
- u32 pad;
- u32 stream;
- bool enabled;
-
- struct v4l2_mbus_framefmt fmt;
- struct v4l2_rect crop;
- struct v4l2_rect compose;
- struct v4l2_fract interval;
-};
-
-/**
* struct v4l2_subdev_stream_configs - A collection of stream configs.
*
* @num_configs: number of entries in @config.
@@ -1962,19 +1939,23 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
*
* Note: only legacy non-MC drivers may need this macro.
*/
-#define v4l2_subdev_call_state_try(sd, o, f, args...) \
- ({ \
- int __result; \
- static struct lock_class_key __key; \
- const char *name = KBUILD_BASENAME \
- ":" __stringify(__LINE__) ":state->lock"; \
- struct v4l2_subdev_state *state = \
- __v4l2_subdev_state_alloc(sd, name, &__key); \
- v4l2_subdev_lock_state(state); \
- __result = v4l2_subdev_call(sd, o, f, state, ##args); \
- v4l2_subdev_unlock_state(state); \
- __v4l2_subdev_state_free(state); \
- __result; \
+#define v4l2_subdev_call_state_try(sd, o, f, args...) \
+ ({ \
+ int __result; \
+ static struct lock_class_key __key; \
+ const char *name = KBUILD_BASENAME \
+ ":" __stringify(__LINE__) ":state->lock"; \
+ struct v4l2_subdev_state *state = \
+ __v4l2_subdev_state_alloc(sd, name, &__key); \
+ if (IS_ERR(state)) { \
+ __result = PTR_ERR(state); \
+ } else { \
+ v4l2_subdev_lock_state(state); \
+ __result = v4l2_subdev_call(sd, o, f, state, ##args); \
+ v4l2_subdev_unlock_state(state); \
+ __v4l2_subdev_state_free(state); \
+ } \
+ __result; \
})
/**
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 2894cfff2da3..91a24b5e0b93 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -33,7 +33,10 @@ struct tc_action {
struct tcf_t tcfa_tm;
struct gnet_stats_basic_sync tcfa_bstats;
struct gnet_stats_basic_sync tcfa_bstats_hw;
- struct gnet_stats_queue tcfa_qstats;
+
+ atomic_t tcfa_drops;
+ atomic_t tcfa_overlimits;
+
struct net_rate_estimator __rcu *tcfa_rate_est;
spinlock_t tcfa_lock;
struct gnet_stats_basic_sync __percpu *cpu_bstats;
@@ -53,7 +56,6 @@ struct tc_action {
#define tcf_action common.tcfa_action
#define tcf_tm common.tcfa_tm
#define tcf_bstats common.tcfa_bstats
-#define tcf_qstats common.tcfa_qstats
#define tcf_rate_est common.tcfa_rate_est
#define tcf_lock common.tcfa_lock
@@ -241,9 +243,7 @@ static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
return;
}
- spin_lock(&a->tcfa_lock);
- qstats_drop_inc(&a->tcfa_qstats);
- spin_unlock(&a->tcfa_lock);
+ atomic_inc(&a->tcfa_drops);
}
static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
@@ -252,9 +252,7 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
return;
}
- spin_lock(&a->tcfa_lock);
- qstats_overlimit_inc(&a->tcfa_qstats);
- spin_unlock(&a->tcfa_lock);
+ atomic_inc(&a->tcfa_overlimits);
}
void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index e5751f3070b8..d46ed9011ee5 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -272,7 +272,8 @@ void bt_err_ratelimited(const char *fmt, ...);
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG)
-#define BT_DBG(fmt, ...) bt_dbg(fmt "\n", ##__VA_ARGS__)
+#define BT_DBG(fmt, ...) \
+ bt_dbg("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
#else
#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
#endif
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index df1847b74e55..9ecc70baaca9 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -488,6 +488,7 @@ enum {
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
#define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
+#define HCI_ISO_TX_TIMEOUT usecs_to_jiffies(0x7fffff) /* 8388607 usecs */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 6560b32f3125..2924c2bf2a98 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -487,6 +487,7 @@ struct hci_dev {
unsigned long acl_last_tx;
unsigned long le_last_tx;
+ unsigned long iso_last_tx;
__u8 le_tx_def_phys;
__u8 le_rx_def_phys;
@@ -1587,16 +1588,18 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u16 setting, struct bt_codec *codec,
u16 timeout);
struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos);
+ __u8 dst_type, struct bt_iso_qos *qos,
+ u16 timeout);
struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 sid,
struct bt_iso_qos *qos,
- __u8 base_len, __u8 *base);
+ __u8 base_len, __u8 *base, u16 timeout);
struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
- __u8 dst_type, struct bt_iso_qos *qos);
+ __u8 dst_type, struct bt_iso_qos *qos,
+ u16 timeout);
struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid,
struct bt_iso_qos *qos,
- __u8 data_len, __u8 *data);
+ __u8 data_len, __u8 *data, u16 timeout);
struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
__u8 dst_type, __u8 sid, struct bt_iso_qos *qos);
int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon,
diff --git a/include/net/bluetooth/hci_drv.h b/include/net/bluetooth/hci_drv.h
index 2f01c44f05ec..3fd6fdbdb02e 100644
--- a/include/net/bluetooth/hci_drv.h
+++ b/include/net/bluetooth/hci_drv.h
@@ -47,7 +47,7 @@ struct hci_drv_ev_cmd_complete {
struct hci_drv_rp_read_info {
__u8 driver_name[HCI_DRV_MAX_DRIVER_NAME_LENGTH];
__le16 num_supported_commands;
- __le16 supported_commands[];
+ __le16 supported_commands[] __counted_by_le(num_supported_commands);
} __packed;
/* Driver specific OGF (Opcode Group Field)
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 3575cd16049a..74edea06985b 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -53,10 +53,15 @@ struct mgmt_hdr {
} __packed;
struct mgmt_tlv {
- __le16 type;
- __u8 length;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(mgmt_tlv_hdr, __hdr, __packed,
+ __le16 type;
+ __u8 length;
+ );
__u8 value[];
} __packed;
+static_assert(offsetof(struct mgmt_tlv, value) == sizeof(struct mgmt_tlv_hdr),
+ "struct member likely outside of __struct_group()");
struct mgmt_addr_info {
bdaddr_t bdaddr;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index dba369a2cf27..c92d4a976246 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -26,6 +26,7 @@ enum {
BOND_AD_STABLE = 0,
BOND_AD_BANDWIDTH = 1,
BOND_AD_COUNT = 2,
+ BOND_AD_PRIO = 3,
};
/* rx machine states(43.4.11 in the 802.3ad standard) */
@@ -274,6 +275,7 @@ struct ad_slave_info {
struct port port; /* 802.3ad port structure */
struct bond_3ad_stats stats;
u16 id;
+ u16 port_priority;
};
static inline const char *bond_3ad_churn_desc(churn_state_t state)
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index 022b122a9fb6..e6eedf23aea1 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -78,6 +78,7 @@ enum {
BOND_OPT_PRIO,
BOND_OPT_COUPLED_CONTROL,
BOND_OPT_BROADCAST_NEIGH,
+ BOND_OPT_ACTOR_PORT_PRIO,
BOND_OPT_LAST
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index e06f0d63b2c1..49edc7da0586 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -126,7 +126,6 @@ struct bond_params {
int arp_interval;
int arp_validate;
int arp_all_targets;
- int use_carrier;
int fail_over_mac;
int updelay;
int downdelay;
@@ -711,6 +710,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
void bond_work_init_all(struct bonding *bond);
+void bond_work_cancel_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 406626ff6cc8..781624f5913a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -101,16 +101,6 @@ struct wiphy;
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
- * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
- * on this channel.
- * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
- * on this channel.
* @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band,
* this flag indicates that a 320 MHz channel cannot use this
* channel as the control or any of the secondary channels.
@@ -129,6 +119,13 @@ struct wiphy;
* with very low power (VLP), even if otherwise set to NO_IR.
* @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel,
* even if otherwise set to NO_IR.
+ * @IEEE80211_CHAN_S1G_NO_PRIMARY: Prevents the channel for use as an S1G
+ * primary channel. Does not prevent the wider operating channel
+ * described by the chandef from being used. In order for a 2MHz primary
+ * to be used, both 1MHz subchannels shall not contain this flag.
+ * @IEEE80211_CHAN_NO_4MHZ: 4 MHz bandwidth is not permitted on this channel.
+ * @IEEE80211_CHAN_NO_8MHZ: 8 MHz bandwidth is not permitted on this channel.
+ * @IEEE80211_CHAN_NO_16MHZ: 16 MHz bandwidth is not permitted on this channel.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = BIT(0),
@@ -145,11 +142,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_20MHZ = BIT(11),
IEEE80211_CHAN_NO_10MHZ = BIT(12),
IEEE80211_CHAN_NO_HE = BIT(13),
- IEEE80211_CHAN_1MHZ = BIT(14),
- IEEE80211_CHAN_2MHZ = BIT(15),
- IEEE80211_CHAN_4MHZ = BIT(16),
- IEEE80211_CHAN_8MHZ = BIT(17),
- IEEE80211_CHAN_16MHZ = BIT(18),
+ /* can use free bits here */
IEEE80211_CHAN_NO_320MHZ = BIT(19),
IEEE80211_CHAN_NO_EHT = BIT(20),
IEEE80211_CHAN_DFS_CONCURRENT = BIT(21),
@@ -158,6 +151,10 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_CAN_MONITOR = BIT(24),
IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25),
IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26),
+ IEEE80211_CHAN_S1G_NO_PRIMARY = BIT(27),
+ IEEE80211_CHAN_NO_4MHZ = BIT(28),
+ IEEE80211_CHAN_NO_8MHZ = BIT(29),
+ IEEE80211_CHAN_NO_16MHZ = BIT(30),
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -821,6 +818,9 @@ struct key_params {
* @punctured: mask of the punctured 20 MHz subchannels, with
* bits turned on being disabled (punctured); numbered
* from lower to higher frequency (like in the spec)
+ * @s1g_primary_2mhz: Indicates if the control channel pointed to
+ * by 'chan' exists as a 1MHz primary subchannel within an
+ * S1G 2MHz primary channel.
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
@@ -830,6 +830,7 @@ struct cfg80211_chan_def {
struct ieee80211_edmg edmg;
u16 freq1_offset;
u16 punctured;
+ bool s1g_primary_2mhz;
};
/*
@@ -841,9 +842,12 @@ struct cfg80211_bitrate_mask {
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
u16 he_mcs[NL80211_HE_NSS_MAX];
+ u16 eht_mcs[NL80211_EHT_NSS_MAX];
enum nl80211_txrate_gi gi;
enum nl80211_he_gi he_gi;
+ enum nl80211_eht_gi eht_gi;
enum nl80211_he_ltf he_ltf;
+ enum nl80211_eht_ltf eht_ltf;
} control[NUM_NL80211_BANDS];
};
@@ -988,6 +992,18 @@ cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef)
}
/**
+ * cfg80211_chandef_is_s1g - check if chandef represents an S1G channel
+ * @chandef: the channel definition
+ *
+ * Return: %true if S1G.
+ */
+static inline bool
+cfg80211_chandef_is_s1g(const struct cfg80211_chan_def *chandef)
+{
+ return chandef->chan->band == NL80211_BAND_S1GHZ;
+}
+
+/**
* cfg80211_chandef_compatible - check if two channel definitions are compatible
* @chandef1: first channel definition
* @chandef2: second channel definition
@@ -2457,6 +2473,29 @@ struct mpath_info {
};
/**
+ * enum wiphy_bss_param_flags - bit positions for supported bss parameters.
+ *
+ * @WIPHY_BSS_PARAM_CTS_PROT: support changing CTS protection.
+ * @WIPHY_BSS_PARAM_SHORT_PREAMBLE: support changing short preamble usage.
+ * @WIPHY_BSS_PARAM_SHORT_SLOT_TIME: support changing short slot time usage.
+ * @WIPHY_BSS_PARAM_BASIC_RATES: support reconfiguring basic rates.
+ * @WIPHY_BSS_PARAM_AP_ISOLATE: support changing AP isolation.
+ * @WIPHY_BSS_PARAM_HT_OPMODE: support changing HT operating mode.
+ * @WIPHY_BSS_PARAM_P2P_CTWINDOW: support reconfiguring ctwindow.
+ * @WIPHY_BSS_PARAM_P2P_OPPPS: support changing P2P opportunistic power-save.
+ */
+enum wiphy_bss_param_flags {
+ WIPHY_BSS_PARAM_CTS_PROT = BIT(0),
+ WIPHY_BSS_PARAM_SHORT_PREAMBLE = BIT(1),
+ WIPHY_BSS_PARAM_SHORT_SLOT_TIME = BIT(2),
+ WIPHY_BSS_PARAM_BASIC_RATES = BIT(3),
+ WIPHY_BSS_PARAM_AP_ISOLATE = BIT(4),
+ WIPHY_BSS_PARAM_HT_OPMODE = BIT(5),
+ WIPHY_BSS_PARAM_P2P_CTWINDOW = BIT(6),
+ WIPHY_BSS_PARAM_P2P_OPPPS = BIT(7),
+};
+
+/**
* struct bss_parameters - BSS parameters
*
* Used to change BSS parameters (mainly for AP mode).
@@ -3887,6 +3926,38 @@ struct cfg80211_qos_map {
};
/**
+ * struct cfg80211_nan_band_config - NAN band specific configuration
+ *
+ * @chan: Pointer to the IEEE 802.11 channel structure. The channel to be used
+ * for NAN operations on this band. For 2.4 GHz band, this is always
+ * channel 6. For 5 GHz band, the channel is either 44 or 149, according
+ * to the regulatory constraints. If chan pointer is NULL the entire band
+ * configuration entry is considered invalid and should not be used.
+ * @rssi_close: RSSI close threshold used for NAN state transition algorithm
+ * as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State
+ * Transition" of Wi-Fi Aware Specification v4.0. If not
+ * specified (set to 0), default device value is used. The value should
+ * be greater than -60 dBm.
+ * @rssi_middle: RSSI middle threshold used for NAN state transition algorithm.
+ * as described in chapters 3.3.6 and 3.3.7 "NAN Device Role and State
+ * Transition" of Wi-Fi Aware Specification v4.0. If not
+ * specified (set to 0), default device value is used. The value should be
+ * greater than -75 dBm and less than rssi_close.
+ * @awake_dw_interval: Committed DW interval. Valid values range: 0-5. 0
+ * indicates no wakeup for DW and can't be used on 2.4GHz band, otherwise
+ * 2^(n-1).
+ * @disable_scan: If true, the device will not scan this band for cluster
+ * merge. Disabling scan on 2.4 GHz band is not allowed.
+ */
+struct cfg80211_nan_band_config {
+ struct ieee80211_channel *chan;
+ s8 rssi_close;
+ s8 rssi_middle;
+ u8 awake_dw_interval;
+ bool disable_scan;
+};
+
+/**
* struct cfg80211_nan_conf - NAN configuration
*
* This struct defines NAN configuration parameters
@@ -3895,10 +3966,34 @@ struct cfg80211_qos_map {
* @bands: operating bands, a bitmap of &enum nl80211_band values.
* For instance, for NL80211_BAND_2GHZ, bit 0 would be set
* (i.e. BIT(NL80211_BAND_2GHZ)).
+ * @cluster_id: cluster ID used for NAN synchronization. This is a MAC address
+ * that can take a value from 50-6F-9A-01-00-00 to 50-6F-9A-01-FF-FF.
+ * If NULL, the device will pick a random Cluster ID.
+ * @scan_period: period (in seconds) between NAN scans.
+ * @scan_dwell_time: dwell time (in milliseconds) for NAN scans.
+ * @discovery_beacon_interval: interval (in TUs) for discovery beacons.
+ * @enable_dw_notification: flag to enable/disable discovery window
+ * notifications.
+ * @band_cfgs: array of band specific configurations, indexed by
+ * &enum nl80211_band values.
+ * @extra_nan_attrs: pointer to additional NAN attributes.
+ * @extra_nan_attrs_len: length of the additional NAN attributes.
+ * @vendor_elems: pointer to vendor-specific elements.
+ * @vendor_elems_len: length of the vendor-specific elements.
*/
struct cfg80211_nan_conf {
u8 master_pref;
u8 bands;
+ const u8 *cluster_id;
+ u16 scan_period;
+ u16 scan_dwell_time;
+ u8 discovery_beacon_interval;
+ bool enable_dw_notification;
+ struct cfg80211_nan_band_config band_cfgs[NUM_NL80211_BANDS];
+ const u8 *extra_nan_attrs;
+ u16 extra_nan_attrs_len;
+ const u8 *vendor_elems;
+ u16 vendor_elems_len;
};
/**
@@ -3907,10 +4002,17 @@ struct cfg80211_nan_conf {
*
* @CFG80211_NAN_CONF_CHANGED_PREF: master preference
* @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands
+ * @CFG80211_NAN_CONF_CHANGED_CONFIG: changed additional configuration.
+ * When this flag is set, it indicates that some additional attribute(s)
+ * (other then master_pref and bands) have been changed. In this case,
+ * all the unchanged attributes will be properly configured to their
+ * previous values. The driver doesn't need to store any
+ * previous configuration besides master_pref and bands.
*/
enum cfg80211_nan_conf_changes {
CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1),
+ CFG80211_NAN_CONF_CHANGED_CONFIG = BIT(2),
};
/**
@@ -5622,6 +5724,42 @@ struct wiphy_radio {
u32 antenna_mask;
};
+/**
+ * enum wiphy_nan_flags - NAN capabilities
+ *
+ * @WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC: Device supports NAN configurable
+ * synchronization.
+ * @WIPHY_NAN_FLAGS_USERSPACE_DE: Device doesn't support DE offload.
+ */
+enum wiphy_nan_flags {
+ WIPHY_NAN_FLAGS_CONFIGURABLE_SYNC = BIT(0),
+ WIPHY_NAN_FLAGS_USERSPACE_DE = BIT(1),
+};
+
+/**
+ * struct wiphy_nan_capa - NAN capabilities
+ *
+ * This structure describes the NAN capabilities of a wiphy.
+ *
+ * @flags: NAN capabilities flags, see &enum wiphy_nan_flags
+ * @op_mode: NAN operation mode, as defined in Wi-Fi Aware (TM) specification
+ * Table 81.
+ * @n_antennas: number of antennas supported by the device for Tx/Rx. Lower
+ * nibble indicates the number of TX antennas and upper nibble indicates the
+ * number of RX antennas. Value 0 indicates the information is not
+ * available.
+ * @max_channel_switch_time: maximum channel switch time in milliseconds.
+ * @dev_capabilities: NAN device capabilities as defined in Wi-Fi Aware (TM)
+ * specification Table 79 (Capabilities field).
+ */
+struct wiphy_nan_capa {
+ u32 flags;
+ u8 op_mode;
+ u8 n_antennas;
+ u16 max_channel_switch_time;
+ u8 dev_capabilities;
+};
+
#define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff
/**
@@ -5782,6 +5920,11 @@ struct wiphy_radio {
* and probe responses. This value should be set if the driver
* wishes to limit the number of csa counters. Default (0) means
* infinite.
+ * @bss_param_support: bitmask indicating which bss_parameters as defined in
+ * &struct bss_parameters the driver can actually handle in the
+ * .change_bss() callback. The bit positions are defined in &enum
+ * wiphy_bss_param_flags.
+ *
* @bss_select_support: bitmask indicating the BSS selection criteria supported
* by the driver in the .connect() callback. The bit position maps to the
* attribute indices defined in &enum nl80211_bss_select_attr.
@@ -5790,6 +5933,7 @@ struct wiphy_radio {
* bitmap of &enum nl80211_band values. For instance, for
* NL80211_BAND_2GHZ, bit 0 would be set
* (i.e. BIT(NL80211_BAND_2GHZ)).
+ * @nan_capa: NAN capabilities
*
* @txq_limit: configuration of internal TX queue frame limit
* @txq_memory_limit: configuration internal TX queue memory limit
@@ -5967,9 +6111,11 @@ struct wiphy {
u8 max_num_csa_counters;
+ u32 bss_param_support;
u32 bss_select_support;
u8 nan_supported_bands;
+ struct wiphy_nan_capa nan_capa;
u32 txq_limit;
u32 txq_memory_limit;
@@ -6548,6 +6694,9 @@ struct wireless_dev {
struct {
struct cfg80211_chan_def chandef;
} ocb;
+ struct {
+ u8 cluster_id[ETH_ALEN] __aligned(2);
+ } nan;
} u;
struct {
@@ -6656,16 +6805,6 @@ ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
}
/**
- * ieee80211_s1g_channel_width - get allowed channel width from @chan
- *
- * Only allowed for band NL80211_BAND_S1GHZ
- * @chan: channel
- * Return: The allowed channel width for this center_freq
- */
-enum nl80211_chan_width
-ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
-
-/**
* ieee80211_channel_to_freq_khz - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
@@ -9548,7 +9687,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
* @wiphy: the wiphy
* @chan: channel for which the supported radio index is required
*
- * Return: radio index on success or a negative error code
+ * Return: radio index on success or -EINVAL otherwise
*/
int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
const struct ieee80211_channel *chan);
@@ -9970,6 +10109,29 @@ void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
*/
void cfg80211_epcs_changed(struct net_device *netdev, bool enabled);
+/**
+ * cfg80211_next_nan_dw_notif - Notify about the next NAN Discovery Window (DW)
+ * @wdev: Pointer to the wireless device structure
+ * @chan: DW channel (6, 44 or 149)
+ * @gfp: Memory allocation flags
+ */
+void cfg80211_next_nan_dw_notif(struct wireless_dev *wdev,
+ struct ieee80211_channel *chan, gfp_t gfp);
+
+/**
+ * cfg80211_nan_cluster_joined - Notify about NAN cluster join
+ * @wdev: Pointer to the wireless device structure
+ * @cluster_id: Cluster ID of the NAN cluster that was joined or started
+ * @new_cluster: Indicates if this is a new cluster or an existing one
+ * @gfp: Memory allocation flags
+ *
+ * This function is used to notify user space when a NAN cluster has been
+ * joined, providing the cluster ID and a flag whether it is a new cluster.
+ */
+void cfg80211_nan_cluster_joined(struct wireless_dev *wdev,
+ const u8 *cluster_id, bool new_cluster,
+ gfp_t gfp);
+
#ifdef CONFIG_CFG80211_DEBUGFS
/**
* wiphy_locked_debugfs_read - do a locked read in debugfs
@@ -10020,4 +10182,72 @@ ssize_t wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
void *data);
#endif
+/**
+ * cfg80211_s1g_get_start_freq_khz - get S1G chandef start frequency
+ * @chandef: the chandef to use
+ *
+ * Return: the chandefs starting frequency in KHz
+ */
+static inline u32
+cfg80211_s1g_get_start_freq_khz(const struct cfg80211_chan_def *chandef)
+{
+ u32 bw_mhz = cfg80211_chandef_get_width(chandef);
+ u32 center_khz =
+ MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
+ return center_khz - bw_mhz * 500 + 500;
+}
+
+/**
+ * cfg80211_s1g_get_end_freq_khz - get S1G chandef end frequency
+ * @chandef: the chandef to use
+ *
+ * Return: the chandefs ending frequency in KHz
+ */
+static inline u32
+cfg80211_s1g_get_end_freq_khz(const struct cfg80211_chan_def *chandef)
+{
+ u32 bw_mhz = cfg80211_chandef_get_width(chandef);
+ u32 center_khz =
+ MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset;
+ return center_khz + bw_mhz * 500 - 500;
+}
+
+/**
+ * cfg80211_s1g_get_primary_sibling - retrieve the sibling 1MHz subchannel
+ * for an S1G chandef using a 2MHz primary channel.
+ * @wiphy: wiphy the channel belongs to
+ * @chandef: the chandef to use
+ *
+ * When chandef::s1g_primary_2mhz is set to true, we are operating on a 2MHz
+ * primary channel. The 1MHz subchannel designated by the primary channel
+ * location exists within chandef::chan, whilst the 'sibling' is denoted as
+ * being the other 1MHz subchannel that make up the 2MHz primary channel.
+ *
+ * Returns: the sibling 1MHz &struct ieee80211_channel, or %NULL on failure.
+ */
+static inline struct ieee80211_channel *
+cfg80211_s1g_get_primary_sibling(struct wiphy *wiphy,
+ const struct cfg80211_chan_def *chandef)
+{
+ int width_mhz = cfg80211_chandef_get_width(chandef);
+ u32 pri_1mhz_khz, sibling_1mhz_khz, op_low_1mhz_khz, pri_index;
+
+ if (!chandef->s1g_primary_2mhz || width_mhz < 2)
+ return NULL;
+
+ pri_1mhz_khz = ieee80211_channel_to_khz(chandef->chan);
+ op_low_1mhz_khz = cfg80211_s1g_get_start_freq_khz(chandef);
+
+ /*
+ * Compute the index of the primary 1 MHz subchannel within the
+ * operating channel, relative to the lowest 1 MHz center frequency.
+ * Flip the least significant bit to select the even/odd sibling,
+ * then translate that index back into a channel frequency.
+ */
+ pri_index = (pri_1mhz_khz - op_low_1mhz_khz) / 1000;
+ sibling_1mhz_khz = op_low_1mhz_khz + ((pri_index ^ 1) * 1000);
+
+ return ieee80211_get_channel_khz(wiphy, sibling_1mhz_khz);
+}
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 7e78e7d6f015..668aeee9b3f6 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -63,7 +63,7 @@ static inline u32 task_get_classid(const struct sk_buff *skb)
* calls by looking at the number of nested bh disable calls because
* softirqs always disables bh.
*/
- if (in_serving_softirq()) {
+ if (softirq_count()) {
struct sock *sk = skb_to_full_sk(skb);
/* If there is an sock_cgroup_classid we'll use that. */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b32c9ceeb81d..9e824f61e40f 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -530,6 +530,8 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
+ DEVLINK_PARAM_GENERIC_ID_TOTAL_VFS,
+ DEVLINK_PARAM_GENERIC_ID_NUM_DOORBELLS,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -594,6 +596,12 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME "clock_id"
#define DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE DEVLINK_PARAM_TYPE_U64
+#define DEVLINK_PARAM_GENERIC_TOTAL_VFS_NAME "total_vfs"
+#define DEVLINK_PARAM_GENERIC_TOTAL_VFS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_NAME "num_doorbells"
+#define DEVLINK_PARAM_GENERIC_NUM_DOORBELLS_TYPE DEVLINK_PARAM_TYPE_U32
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -746,6 +754,10 @@ enum devlink_health_reporter_state {
* if priv_ctx is NULL, run a full dump
* @diagnose: callback to diagnose the current status
* @test: callback to trigger a test event
+ * @default_graceful_period: default min time (in msec)
+ * between recovery attempts
+ * @default_burst_period: default time (in msec) for
+ * error recoveries before starting the grace period
*/
struct devlink_health_reporter_ops {
@@ -760,6 +772,8 @@ struct devlink_health_reporter_ops {
struct netlink_ext_ack *extack);
int (*test)(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack);
+ u64 default_graceful_period;
+ u64 default_burst_period;
};
/**
@@ -1743,7 +1757,7 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
struct ib_device *ibdev);
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
- struct devlink_port_attrs *devlink_port_attrs);
+ const struct devlink_port_attrs *attrs);
void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
u16 pf, bool external);
void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
@@ -1928,22 +1942,22 @@ void devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
struct devlink_health_reporter *
devl_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
struct devlink_health_reporter *
devlink_port_health_reporter_create(struct devlink_port *port,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
struct devlink_health_reporter *
devl_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, void *priv);
+ void *priv);
void
devl_health_reporter_destroy(struct devlink_health_reporter *reporter);
diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h
index d8ff24a33459..58d91ccc56e0 100644
--- a/include/net/dropreason-core.h
+++ b/include/net/dropreason-core.h
@@ -127,6 +127,8 @@
FN(CANXL_RX_INVALID_FRAME) \
FN(PFMEMALLOC) \
FN(DUALPI2_STEP_DROP) \
+ FN(PSP_INPUT) \
+ FN(PSP_OUTPUT) \
FNe(MAX)
/**
@@ -610,6 +612,10 @@ enum skb_drop_reason {
* threshold of DualPI2 qdisc.
*/
SKB_DROP_REASON_DUALPI2_STEP_DROP,
+ /** @SKB_DROP_REASON_PSP_INPUT: PSP input checks failed */
+ SKB_DROP_REASON_PSP_INPUT,
+ /** @SKB_DROP_REASON_PSP_OUTPUT: PSP output checks failed */
+ SKB_DROP_REASON_PSP_OUTPUT,
/**
* @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which
* shouldn't be used as a real 'reason' - only for tracing code gen
diff --git a/include/net/dst.h b/include/net/dst.h
index bab01363bb97..f8aa1239b4db 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -24,7 +24,10 @@
struct sk_buff;
struct dst_entry {
- struct net_device *dev;
+ union {
+ struct net_device *dev;
+ struct net_device __rcu *dev_rcu;
+ };
struct dst_ops *ops;
unsigned long _metrics;
unsigned long expires;
@@ -570,9 +573,12 @@ static inline struct net_device *dst_dev(const struct dst_entry *dst)
static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
{
- /* In the future, use rcu_dereference(dst->dev) */
- WARN_ON_ONCE(!rcu_read_lock_held());
- return READ_ONCE(dst->dev);
+ return rcu_dereference(dst->dev_rcu);
+}
+
+static inline struct net *dst_dev_net_rcu(const struct dst_entry *dst)
+{
+ return dev_net_rcu(dst_dev_rcu(dst));
}
static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
@@ -592,7 +598,7 @@ static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb)
{
- return dev_net_rcu(skb_dst_dev(skb));
+ return dev_net_rcu(skb_dst_dev_rcu(skb));
}
struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
diff --git a/include/net/flow.h b/include/net/flow.h
index a1839c278d87..ae9481c40063 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -12,6 +12,7 @@
#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/uidgid.h>
+#include <net/inet_dscp.h>
struct flow_keys;
@@ -32,7 +33,7 @@ struct flowi_common {
int flowic_iif;
int flowic_l3mdev;
__u32 flowic_mark;
- __u8 flowic_tos;
+ dscp_t flowic_dscp;
__u8 flowic_scope;
__u8 flowic_proto;
__u8 flowic_flags;
@@ -70,7 +71,7 @@ struct flowi4 {
#define flowi4_iif __fl_common.flowic_iif
#define flowi4_l3mdev __fl_common.flowic_l3mdev
#define flowi4_mark __fl_common.flowic_mark
-#define flowi4_tos __fl_common.flowic_tos
+#define flowi4_dscp __fl_common.flowic_dscp
#define flowi4_scope __fl_common.flowic_scope
#define flowi4_proto __fl_common.flowic_proto
#define flowi4_flags __fl_common.flowic_flags
@@ -103,7 +104,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_l3mdev = 0;
fl4->flowi4_mark = mark;
- fl4->flowi4_tos = tos;
+ fl4->flowi4_dscp = inet_dsfield_to_dscp(tos);
fl4->flowi4_scope = scope;
fl4->flowi4_proto = proto;
fl4->flowi4_flags = flags;
@@ -141,7 +142,7 @@ struct flowi6 {
#define flowi6_uid __fl_common.flowic_uid
struct in6_addr daddr;
struct in6_addr saddr;
- /* Note: flowi6_tos is encoded in flowlabel, too. */
+ /* Note: flowi6_dscp is encoded in flowlabel, too. */
__be32 flowlabel;
union flowi_uli uli;
#define fl6_sport uli.ports.sport
@@ -163,7 +164,7 @@ struct flowi {
#define flowi_iif u.__fl_common.flowic_iif
#define flowi_l3mdev u.__fl_common.flowic_l3mdev
#define flowi_mark u.__fl_common.flowic_mark
-#define flowi_tos u.__fl_common.flowic_tos
+#define flowi_dscp u.__fl_common.flowic_dscp
#define flowi_scope u.__fl_common.flowic_scope
#define flowi_proto u.__fl_common.flowic_proto
#define flowi_flags u.__fl_common.flowic_flags
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index a03d56765832..7b84f2cef8b1 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -62,7 +62,7 @@ struct genl_info;
* @small_ops: the small-struct operations supported by this family
* @n_small_ops: number of small-struct operations supported by this family
* @split_ops: the split do/dump form of operation definition
- * @n_split_ops: number of entries in @split_ops, not that with split do/dump
+ * @n_split_ops: number of entries in @split_ops, note that with split do/dump
* ops the number of entries is not the same as number of commands
* @sock_priv_size: the size of per-socket private memory
* @sock_priv_init: the per-socket private memory initializer
diff --git a/include/net/gro.h b/include/net/gro.h
index a0fca7ac6e7e..e3affb2e2ca8 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -71,14 +71,11 @@ struct napi_gro_cb {
/* Free the skb? */
u8 free:2;
- /* Used in foo-over-udp, set in udp[46]_gro_receive */
- u8 is_ipv6:1;
-
/* Used in GRE, set in fou/gue_gro_receive */
u8 is_fou:1;
/* Used to determine if ipid_offset can be ignored */
- u8 ip_fixedid:1;
+ u8 ip_fixedid:2;
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
@@ -445,29 +442,25 @@ static inline __wsum ip6_gro_compute_pseudo(const struct sk_buff *skb,
}
static inline int inet_gro_flush(const struct iphdr *iph, const struct iphdr *iph2,
- struct sk_buff *p, bool outer)
+ struct sk_buff *p, bool inner)
{
const u32 id = ntohl(*(__be32 *)&iph->id);
const u32 id2 = ntohl(*(__be32 *)&iph2->id);
const u16 ipid_offset = (id >> 16) - (id2 >> 16);
const u16 count = NAPI_GRO_CB(p)->count;
- const u32 df = id & IP_DF;
- int flush;
/* All fields must match except length and checksum. */
- flush = (iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | (df ^ (id2 & IP_DF));
-
- if (flush | (outer && df))
- return flush;
+ if ((iph->ttl ^ iph2->ttl) | (iph->tos ^ iph2->tos) | ((id ^ id2) & IP_DF))
+ return true;
/* When we receive our second frame we can make a decision on if we
* continue this flow as an atomic flow with a fixed ID or if we use
* an incrementing ID.
*/
- if (count == 1 && df && !ipid_offset)
- NAPI_GRO_CB(p)->ip_fixedid = true;
+ if (count == 1 && !ipid_offset)
+ NAPI_GRO_CB(p)->ip_fixedid |= 1 << inner;
- return ipid_offset ^ (count * !NAPI_GRO_CB(p)->ip_fixedid);
+ return ipid_offset ^ (count * !(NAPI_GRO_CB(p)->ip_fixedid & (1 << inner)));
}
static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr *iph2)
@@ -482,7 +475,7 @@ static inline int ipv6_gro_flush(const struct ipv6hdr *iph, const struct ipv6hdr
static inline int __gro_receive_network_flush(const void *th, const void *th2,
struct sk_buff *p, const u16 diff,
- bool outer)
+ bool inner)
{
const void *nh = th - diff;
const void *nh2 = th2 - diff;
@@ -490,19 +483,18 @@ static inline int __gro_receive_network_flush(const void *th, const void *th2,
if (((struct iphdr *)nh)->version == 6)
return ipv6_gro_flush(nh, nh2);
else
- return inet_gro_flush(nh, nh2, p, outer);
+ return inet_gro_flush(nh, nh2, p, inner);
}
static inline int gro_receive_network_flush(const void *th, const void *th2,
struct sk_buff *p)
{
- const bool encap_mark = NAPI_GRO_CB(p)->encap_mark;
int off = skb_transport_offset(p);
int flush;
- flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, encap_mark);
- if (encap_mark)
- flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, false);
+ flush = __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->network_offset, false);
+ if (NAPI_GRO_CB(p)->encap_mark)
+ flush |= __gro_receive_network_flush(th, th2, p, off - NAPI_GRO_CB(p)->inner_network_offset, true);
return flush;
}
diff --git a/include/net/hotdata.h b/include/net/hotdata.h
index fda94b2647ff..4acec191c54a 100644
--- a/include/net/hotdata.h
+++ b/include/net/hotdata.h
@@ -2,10 +2,16 @@
#ifndef _NET_HOTDATA_H
#define _NET_HOTDATA_H
+#include <linux/llist.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <net/protocol.h>
+struct skb_defer_node {
+ struct llist_head defer_list;
+ atomic_long_t defer_count;
+} ____cacheline_aligned_in_smp;
+
/* Read mostly data used in network fast paths. */
struct net_hotdata {
#if IS_ENABLED(CONFIG_INET)
@@ -30,6 +36,7 @@ struct net_hotdata {
struct rps_sock_flow_table __rcu *rps_sock_flow_table;
u32 rps_cpu_mask;
#endif
+ struct skb_defer_node __percpu *skb_defer_nodes;
int gro_normal_batch;
int netdev_budget;
int netdev_budget_usecs;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index caddf4a59ad1..935ee13d9ae9 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -37,10 +37,10 @@ struct sk_buff;
struct net;
void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
- const struct ip_options *opt);
+ const struct inet_skb_parm *parm);
static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
- __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+ __icmp_send(skb_in, type, code, info, IPCB(skb_in));
}
#if IS_ENABLED(CONFIG_NF_NAT)
@@ -48,8 +48,10 @@ void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
#else
static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
- struct ip_options opts = { 0 };
- __icmp_send(skb_in, type, code, info, &opts);
+ struct inet_skb_parm parm;
+
+ memset(&parm, 0, sizeof(parm));
+ __icmp_send(skb_in, type, code, info, &parm);
}
#endif
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index ab3929a2a956..282e29237d93 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -41,7 +41,6 @@ static inline unsigned int __inet6_ehashfn(const u32 lhash,
* The sockhash lock must be held as a reader here.
*/
struct sock *__inet6_lookup_established(const struct net *net,
- struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
@@ -65,7 +64,6 @@ struct sock *inet6_lookup_reuseport(const struct net *net, struct sock *sk,
inet6_ehashfn_t *ehashfn);
struct sock *inet6_lookup_listener(const struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
const __be16 sport,
@@ -83,7 +81,6 @@ struct sock *inet6_lookup_run_sk_lookup(const struct net *net,
inet6_ehashfn_t *ehashfn);
static inline struct sock *__inet6_lookup(const struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const struct in6_addr *saddr,
const __be16 sport,
@@ -92,14 +89,14 @@ static inline struct sock *__inet6_lookup(const struct net *net,
const int dif, const int sdif,
bool *refcounted)
{
- struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
- sport, daddr, hnum,
+ struct sock *sk = __inet6_lookup_established(net, saddr, sport,
+ daddr, hnum,
dif, sdif);
*refcounted = true;
if (sk)
return sk;
*refcounted = false;
- return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
+ return inet6_lookup_listener(net, skb, doff, saddr, sport,
daddr, hnum, dif, sdif);
}
@@ -143,8 +140,7 @@ struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff,
return reuse_sk;
}
-static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
- struct sk_buff *skb, int doff,
+static inline struct sock *__inet6_lookup_skb(struct sk_buff *skb, int doff,
const __be16 sport,
const __be16 dport,
int iif, int sdif,
@@ -161,20 +157,16 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
if (sk)
return sk;
- return __inet6_lookup(net, hashinfo, skb,
- doff, &ip6h->saddr, sport,
+ return __inet6_lookup(net, skb, doff, &ip6h->saddr, sport,
&ip6h->daddr, ntohs(dport),
iif, sdif, refcounted);
}
-struct sock *inet6_lookup(const struct net *net, struct inet_hashinfo *hashinfo,
- struct sk_buff *skb, int doff,
+struct sock *inet6_lookup(const struct net *net, struct sk_buff *skb, int doff,
const struct in6_addr *saddr, const __be16 sport,
const struct in6_addr *daddr, const __be16 dport,
const int dif);
-int inet6_hash(struct sock *sk);
-
static inline bool inet6_match(const struct net *net, const struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 1735db332aab..b4b886647607 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -299,14 +299,8 @@ reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
return (unsigned long)min_t(u64, timeout, max_timeout);
}
-static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
-{
- /* The below has to be done to allow calling inet_csk_destroy_sock */
- sock_set_flag(sk, SOCK_DEAD);
- this_cpu_inc(*sk->sk_prot->orphan_count);
-}
-
void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_for_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
/*
@@ -322,8 +316,9 @@ int inet_csk_listen_start(struct sock *sk);
void inet_csk_listen_stop(struct sock *sk);
/* update the fast reuse flag when adding a socket */
-void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
- struct sock *sk);
+void inet_csk_update_fastreuse(const struct sock *sk,
+ struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2);
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
diff --git a/include/net/inet_dscp.h b/include/net/inet_dscp.h
index 72f250dffada..1aa9f04ed1ab 100644
--- a/include/net/inet_dscp.h
+++ b/include/net/inet_dscp.h
@@ -39,6 +39,12 @@ typedef u8 __bitwise dscp_t;
#define INET_DSCP_MASK 0xfc
+/* A few places in the IPv4 code need to ignore the three high order bits of
+ * DSCP because of backward compatibility (as these bits used to represent the
+ * IPv4 Precedence in RFC 791's TOS field and were ignored).
+ */
+#define INET_DSCP_LEGACY_TOS_MASK ((__force dscp_t)0x1c)
+
static inline dscp_t inet_dsfield_to_dscp(__u8 dsfield)
{
return (__force dscp_t)(dsfield & INET_DSCP_MASK);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 19dbd9081d5a..ac05a52d9e13 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -108,6 +108,8 @@ struct inet_bind2_bucket {
struct hlist_node bhash_node;
/* List of sockets hashed to this bucket */
struct hlist_head owners;
+ signed char fastreuse;
+ signed char fastreuseport;
};
static inline struct net *ib_net(const struct inet_bind_bucket *ib)
@@ -289,12 +291,10 @@ int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
bool *found_dup_sk);
-int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
struct sock *__inet_lookup_listener(const struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr,
@@ -302,12 +302,12 @@ struct sock *__inet_lookup_listener(const struct net *net,
const int dif, const int sdif);
static inline struct sock *inet_lookup_listener(struct net *net,
- struct inet_hashinfo *hashinfo,
- struct sk_buff *skb, int doff,
- __be32 saddr, __be16 sport,
- __be32 daddr, __be16 dport, int dif, int sdif)
+ struct sk_buff *skb, int doff,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, __be16 dport,
+ int dif, int sdif)
{
- return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
+ return __inet_lookup_listener(net, skb, doff, saddr, sport,
daddr, ntohs(dport), dif, sdif);
}
@@ -358,7 +358,6 @@ static inline bool inet_match(const struct net *net, const struct sock *sk,
* not check it for lookups anymore, thanks Alexey. -DaveM
*/
struct sock *__inet_lookup_established(const struct net *net,
- struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 hnum,
const int dif, const int sdif);
@@ -384,18 +383,16 @@ struct sock *inet_lookup_run_sk_lookup(const struct net *net,
__be32 daddr, u16 hnum, const int dif,
inet_ehashfn_t *ehashfn);
-static inline struct sock *
- inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
- const __be32 saddr, const __be16 sport,
- const __be32 daddr, const __be16 dport,
- const int dif)
+static inline struct sock *inet_lookup_established(struct net *net,
+ const __be32 saddr, const __be16 sport,
+ const __be32 daddr, const __be16 dport,
+ const int dif)
{
- return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
+ return __inet_lookup_established(net, saddr, sport, daddr,
ntohs(dport), dif, 0);
}
static inline struct sock *__inet_lookup(struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
@@ -405,18 +402,17 @@ static inline struct sock *__inet_lookup(struct net *net,
u16 hnum = ntohs(dport);
struct sock *sk;
- sk = __inet_lookup_established(net, hashinfo, saddr, sport,
+ sk = __inet_lookup_established(net, saddr, sport,
daddr, hnum, dif, sdif);
*refcounted = true;
if (sk)
return sk;
*refcounted = false;
- return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
+ return __inet_lookup_listener(net, skb, doff, saddr,
sport, daddr, hnum, dif, sdif);
}
static inline struct sock *inet_lookup(struct net *net,
- struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const __be16 dport,
@@ -425,7 +421,7 @@ static inline struct sock *inet_lookup(struct net *net,
struct sock *sk;
bool refcounted;
- sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
+ sk = __inet_lookup(net, skb, doff, saddr, sport, daddr,
dport, dif, 0, &refcounted);
if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
@@ -473,8 +469,7 @@ struct sock *inet_steal_sock(struct net *net, struct sk_buff *skb, int doff,
return reuse_sk;
}
-static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
- struct sk_buff *skb,
+static inline struct sock *__inet_lookup_skb(struct sk_buff *skb,
int doff,
const __be16 sport,
const __be16 dport,
@@ -492,8 +487,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
if (sk)
return sk;
- return __inet_lookup(net, hashinfo, skb,
- doff, iph->saddr, sport,
+ return __inet_lookup(net, skb, doff, iph->saddr, sport,
iph->daddr, dport, inet_iif(skb), sdif,
refcounted);
}
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 67a313575780..63a644ff30de 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -70,7 +70,8 @@ struct inet_timewait_sock {
unsigned int tw_transparent : 1,
tw_flowlabel : 20,
tw_usec_ts : 1,
- tw_pad : 2, /* 2 bits hole */
+ tw_connect_bind : 1,
+ tw_pad : 1, /* 1 bit hole */
tw_tos : 8;
u32 tw_txhash;
u32 tw_priority;
@@ -81,6 +82,14 @@ struct inet_timewait_sock {
struct timer_list tw_timer;
struct inet_bind_bucket *tw_tb;
struct inet_bind2_bucket *tw_tb2;
+#if IS_ENABLED(CONFIG_INET_PSP)
+ struct psp_assoc __rcu *psp_assoc;
+#endif
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ struct sk_buff* (*tw_validate_xmit_skb)(struct sock *sk,
+ struct net_device *dev,
+ struct sk_buff *skb);
+#endif
};
#define tw_tclass tw_tos
diff --git a/include/net/ip.h b/include/net/ip.h
index befcba575129..380afb691c41 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -326,11 +326,12 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
}
#endif
-#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
+#define snmp_get_cpu_field64_batch_cnt(buff64, stats_list, cnt, \
+ mib_statistic, offset) \
{ \
int i, c; \
for_each_possible_cpu(c) { \
- for (i = 0; stats_list[i].name; i++) \
+ for (i = 0; i < cnt; i++) \
buff64[i] += snmp_get_cpu_field64( \
mib_statistic, \
c, stats_list[i].entry, \
@@ -338,11 +339,11 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
} \
}
-#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
+#define snmp_get_cpu_field_batch_cnt(buff, stats_list, cnt, mib_statistic) \
{ \
int i, c; \
for_each_possible_cpu(c) { \
- for (i = 0; stats_list[i].name; i++) \
+ for (i = 0; i < cnt; i++) \
buff[i] += snmp_get_cpu_field( \
mib_statistic, \
c, stats_list[i].entry); \
@@ -467,12 +468,14 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
const struct rtable *rt = dst_rtable(dst);
+ const struct net_device *dev;
unsigned int mtu, res;
struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst_dev(dst));
+ dev = dst_dev_rcu(dst);
+ net = dev_net_rcu(dev);
if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
ip_mtu_locked(dst) ||
!forwarding) {
@@ -486,7 +489,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
if (mtu)
goto out;
- mtu = READ_ONCE(dst_dev(dst)->mtu);
+ mtu = READ_ONCE(dev->mtu);
if (unlikely(ip_mtu_locked(dst))) {
if (rt->rt_uses_gateway && mtu > 576)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 9255f21818ee..7c5512baa4b2 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -229,16 +229,16 @@ static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
* Store a destination cache entry in a socket
*/
static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
- const struct in6_addr *daddr,
- const struct in6_addr *saddr)
+ bool daddr_set,
+ bool saddr_set)
{
struct ipv6_pinfo *np = inet6_sk(sk);
np->dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
sk_setup_caps(sk, dst);
- np->daddr_cache = daddr;
+ np->daddr_cache = daddr_set;
#ifdef CONFIG_IPV6_SUBTREES
- np->saddr_cache = saddr;
+ np->saddr_cache = saddr_set;
#endif
}
@@ -337,7 +337,7 @@ static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst
mtu = IPV6_MIN_MTU;
rcu_read_lock();
- idev = __in6_dev_get(dst_dev(dst));
+ idev = __in6_dev_get(dst_dev_rcu(dst));
if (idev)
mtu = READ_ONCE(idev->cnf.mtu6);
rcu_read_unlock();
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 48bb3cf41469..b4495c38e0a0 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -440,7 +440,7 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
static inline bool fib_dscp_masked_match(dscp_t dscp, const struct flowi4 *fl4)
{
- return dscp == inet_dsfield_to_dscp(RT_TOS(fl4->flowi4_tos));
+ return dscp == (fl4->flowi4_dscp & INET_DSCP_LEGACY_TOS_MASK);
}
/* Exported by fib_frontend.c */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 8cf1380f3656..4314a97702ea 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -11,7 +11,9 @@
#include <linux/bitops.h>
#include <net/dsfield.h>
+#include <net/flow.h>
#include <net/gro_cells.h>
+#include <net/inet_dscp.h>
#include <net/inet_ecn.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
@@ -362,7 +364,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
fl4->daddr = daddr;
fl4->saddr = saddr;
- fl4->flowi4_tos = tos;
+ fl4->flowi4_dscp = inet_dsfield_to_dscp(tos);
fl4->flowi4_proto = proto;
fl4->fl4_gre_key = key;
fl4->flowi4_mark = mark;
diff --git a/include/net/libeth/xdp.h b/include/net/libeth/xdp.h
index f4880b50e804..bc3507edd589 100644
--- a/include/net/libeth/xdp.h
+++ b/include/net/libeth/xdp.h
@@ -1274,7 +1274,6 @@ bool libeth_xdp_buff_add_frag(struct libeth_xdp_buff *xdp,
* Internal, use libeth_xdp_process_buff() instead. Initializes XDP buffer
* head with the Rx buffer data: data pointer, length, headroom, and
* truesize/tailroom. Zeroes the flags.
- * Uses faster single u64 write instead of per-field access.
*/
static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
const struct libeth_fqe *fqe,
@@ -1282,17 +1281,9 @@ static inline void libeth_xdp_prepare_buff(struct libeth_xdp_buff *xdp,
{
const struct page *page = __netmem_to_page(fqe->netmem);
-#ifdef __LIBETH_WORD_ACCESS
- static_assert(offsetofend(typeof(xdp->base), flags) -
- offsetof(typeof(xdp->base), frame_sz) ==
- sizeof(u64));
-
- *(u64 *)&xdp->base.frame_sz = fqe->truesize;
-#else
- xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
-#endif
xdp_prepare_buff(&xdp->base, page_address(page) + fqe->offset,
pp_page_to_nmdesc(page)->pp->p.offset, len, true);
+ xdp_init_buff(&xdp->base, fqe->truesize, xdp->base.rxq);
}
/**
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index a45e4bee65d4..a55085cf4ec4 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -3192,6 +3192,10 @@ ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
{
if (WARN_ON_ONCE(c->control.rates[0].idx < 0))
return NULL;
+
+ if (c->band >= NUM_NL80211_BANDS)
+ return NULL;
+
return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx];
}
@@ -7834,4 +7838,10 @@ int ieee80211_emulate_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode);
+/**
+ * ieee80211_vif_nan_started - Return whether a NAN vif is started
+ * @vif: the vif
+ * Return: %true iff the vif is a NAN interface and NAN is started
+ */
+bool ieee80211_vif_nan_started(struct ieee80211_vif *vif);
#endif /* MAC80211_H */
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index e1030a7d2daa..0921485565c0 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -65,6 +65,8 @@ enum TRI_STATE {
#define MANA_STATS_RX_COUNT 5
#define MANA_STATS_TX_COUNT 11
+#define MANA_RX_FRAG_ALIGNMENT 64
+
struct mana_stats_rx {
u64 packets;
u64 bytes;
@@ -328,6 +330,7 @@ struct mana_rxq {
u32 datasize;
u32 alloc_size;
u32 headroom;
+ u32 frag_count;
mana_handle_t rxobj;
@@ -510,6 +513,7 @@ struct mana_port_context {
u32 rxbpre_datasize;
u32 rxbpre_alloc_size;
u32 rxbpre_headroom;
+ u32 rxbpre_frag_count;
struct bpf_prog *bpf_prog;
diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
index 6e835972abd1..cd00e0406cf4 100644
--- a/include/net/netdev_queues.h
+++ b/include/net/netdev_queues.h
@@ -127,6 +127,9 @@ void netdev_stat_queue_sum(struct net_device *netdev,
* @ndo_queue_stop: Stop the RX queue at the specified index. The stopped
* queue's memory is written at the specified address.
*
+ * @ndo_queue_get_dma_dev: Get dma device for zero-copy operations to be used
+ * for this queue. Return NULL on error.
+ *
* Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while
* the interface is closed. @ndo_queue_start and @ndo_queue_stop will only
* be called for an interface which is open.
@@ -144,8 +147,12 @@ struct netdev_queue_mgmt_ops {
int (*ndo_queue_stop)(struct net_device *dev,
void *per_queue_mem,
int idx);
+ struct device * (*ndo_queue_get_dma_dev)(struct net_device *dev,
+ int idx);
};
+bool netif_rxq_has_unreadable_mp(struct net_device *dev, int idx);
+
/**
* DOC: Lockless queue stopping / waking helpers.
*
@@ -321,4 +328,6 @@ static inline void netif_subqueue_sent(const struct net_device *dev,
get_desc, start_thrs); \
})
+struct device *netdev_queue_get_dma_dev(struct net_device *dev, int idx);
+
#endif
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index c653fcb88354..09de2f2686b5 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -10,14 +10,6 @@
void nf_send_unreach(struct sk_buff *skb_in, int code, int hook);
void nf_send_reset(struct net *net, struct sock *, struct sk_buff *oldskb,
int hook);
-const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
- struct tcphdr *_oth, int hook);
-struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- __u8 protocol, int ttl);
-void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
- const struct tcphdr *oth);
-
struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index d729344ba644..94ec0b9f2838 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -9,16 +9,6 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char cod
unsigned int hooknum);
void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
int hook);
-const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
- struct tcphdr *otcph,
- unsigned int *otcplen, int hook);
-struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- __u8 protocol, int hoplimit);
-void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
- const struct sk_buff *oldskb,
- const struct tcphdr *oth, unsigned int otcplen);
-
struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
struct sk_buff *oldskb,
const struct net_device *dev,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3faa80f5d801..fab7dc73f738 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -556,6 +556,7 @@ struct nft_set_elem_expr {
* @size: maximum set size
* @field_len: length of each field in concatenation, bytes
* @field_count: number of concatenated fields in element
+ * @in_update_walk: true during ->walk() in transaction phase
* @use: number of rules references to this set
* @nelems: number of elements
* @ndeact: number of deactivated elements queued for removal
@@ -590,6 +591,7 @@ struct nft_set {
u32 size;
u8 field_len[NFT_REG32_COUNT];
u8 field_count;
+ bool in_update_walk;
u32 use;
atomic_t nelems;
u32 ndeact;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 656e784714f3..b8df5acbb723 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -73,7 +73,7 @@ struct nft_ct {
struct nft_payload {
enum nft_payload_bases base:8;
- u8 offset;
+ u16 offset;
u8 len;
u8 dreg;
};
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 6373e3f17da8..34eb3aecb3f2 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -148,6 +148,8 @@ struct netns_ipv4 {
struct local_ports ip_local_ports;
u8 sysctl_tcp_ecn;
+ u8 sysctl_tcp_ecn_option;
+ u8 sysctl_tcp_ecn_option_beacon;
u8 sysctl_tcp_ecn_fallback;
u8 sysctl_ip_default_ttl;
@@ -251,6 +253,7 @@ struct netns_ipv4 {
int sysctl_igmp_qrv;
struct ping_group_range ping_group_range;
+ u16 ping_port_rover;
atomic_t dev_addr_genid;
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index d25cd7a9c5ff..c0f97f36389e 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -75,8 +75,8 @@ struct netns_sctp {
/* Whether Cookie Preservative is enabled(1) or not(0) */
int cookie_preserve_enable;
- /* The namespace default hmac alg */
- char *sctp_hmac_alg;
+ /* Whether cookie authentication is enabled(1) or not(0) */
+ int cookie_auth_enable;
/* Valid.Cookie.Life - 60 seconds */
unsigned int valid_cookie_life;
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index e180bdf2f82b..664d5058e66e 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -52,7 +52,7 @@ enum nci_state {
#define NCI_RF_DISC_SELECT_TIMEOUT 5000
#define NCI_RF_DEACTIVATE_TIMEOUT 30000
#define NCI_CMD_TIMEOUT 5000
-#define NCI_DATA_TIMEOUT 700
+#define NCI_DATA_TIMEOUT 3000
struct nci_dev;
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index db180626be06..3247026e096a 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -489,6 +489,11 @@ page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool,
offset, dma_sync_size);
}
+static inline void page_pool_get(struct page_pool *pool)
+{
+ refcount_inc(&pool->user_cnt);
+}
+
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
@@ -500,6 +505,18 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
page_pool_update_nid(pool, new_nid);
}
+/**
+ * page_pool_is_unreadable() - will allocated buffers be unreadable for the CPU
+ * @pool: queried page pool
+ *
+ * Check if page pool will return buffers which are unreadable to the CPU /
+ * kernel. This will only be the case if user space bound a memory provider (mp)
+ * which returns unreadable memory to the queue served by the page pool.
+ * If %PP_FLAG_ALLOW_UNREADABLE_NETMEM was set but there is no mp bound
+ * this helper will return false. See also netif_rxq_has_unreadable_mp().
+ *
+ * Return: true if memory allocated by the page pool may be unreadable
+ */
static inline bool page_pool_is_unreadable(struct page_pool *pool)
{
return !!pool->mp_ops;
diff --git a/include/net/ping.h b/include/net/ping.h
index bc7779262e60..9634b8800814 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -54,7 +54,6 @@ struct pingfakehdr {
};
int ping_get_port(struct sock *sk, unsigned short ident);
-int ping_hash(struct sock *sk);
void ping_unhash(struct sock *sk);
int ping_init_sock(struct sock *sk);
diff --git a/include/net/proto_memory.h b/include/net/proto_memory.h
index a6ab2f4f5e28..8e91a8fa31b5 100644
--- a/include/net/proto_memory.h
+++ b/include/net/proto_memory.h
@@ -31,8 +31,8 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
if (!sk->sk_prot->memory_pressure)
return false;
- if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ if (mem_cgroup_sk_enabled(sk) &&
+ mem_cgroup_sk_under_memory_pressure(sk))
return true;
return !!READ_ONCE(*sk->sk_prot->memory_pressure);
diff --git a/include/net/psp.h b/include/net/psp.h
new file mode 100644
index 000000000000..33bb4d1dc46e
--- /dev/null
+++ b/include/net/psp.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __NET_PSP_ALL_H
+#define __NET_PSP_ALL_H
+
+#include <uapi/linux/psp.h>
+#include <net/psp/functions.h>
+#include <net/psp/types.h>
+
+/* Do not add any code here. Put it in the sub-headers instead. */
+
+#endif /* __NET_PSP_ALL_H */
diff --git a/include/net/psp/functions.h b/include/net/psp/functions.h
new file mode 100644
index 000000000000..ef7743664da3
--- /dev/null
+++ b/include/net/psp/functions.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __NET_PSP_HELPERS_H
+#define __NET_PSP_HELPERS_H
+
+#include <linux/skbuff.h>
+#include <linux/rcupdate.h>
+#include <linux/udp.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/psp/types.h>
+
+struct inet_timewait_sock;
+
+/* Driver-facing API */
+struct psp_dev *
+psp_dev_create(struct net_device *netdev, struct psp_dev_ops *psd_ops,
+ struct psp_dev_caps *psd_caps, void *priv_ptr);
+void psp_dev_unregister(struct psp_dev *psd);
+bool psp_dev_encapsulate(struct net *net, struct sk_buff *skb, __be32 spi,
+ u8 ver, __be16 sport);
+int psp_dev_rcv(struct sk_buff *skb, u16 dev_id, u8 generation, bool strip_icv);
+
+/* Kernel-facing API */
+void psp_assoc_put(struct psp_assoc *pas);
+
+static inline void *psp_assoc_drv_data(struct psp_assoc *pas)
+{
+ return pas->drv_data;
+}
+
+#if IS_ENABLED(CONFIG_INET_PSP)
+unsigned int psp_key_size(u32 version);
+void psp_sk_assoc_free(struct sock *sk);
+void psp_twsk_init(struct inet_timewait_sock *tw, const struct sock *sk);
+void psp_twsk_assoc_free(struct inet_timewait_sock *tw);
+void psp_reply_set_decrypted(struct sk_buff *skb);
+
+static inline struct psp_assoc *psp_sk_assoc(const struct sock *sk)
+{
+ return rcu_dereference_check(sk->psp_assoc, lockdep_sock_is_held(sk));
+}
+
+static inline void
+psp_enqueue_set_decrypted(struct sock *sk, struct sk_buff *skb)
+{
+ struct psp_assoc *pas;
+
+ pas = psp_sk_assoc(sk);
+ if (pas && pas->tx.spi)
+ skb->decrypted = 1;
+}
+
+static inline unsigned long
+__psp_skb_coalesce_diff(const struct sk_buff *one, const struct sk_buff *two,
+ unsigned long diffs)
+{
+ struct psp_skb_ext *a, *b;
+
+ a = skb_ext_find(one, SKB_EXT_PSP);
+ b = skb_ext_find(two, SKB_EXT_PSP);
+
+ diffs |= (!!a) ^ (!!b);
+ if (!diffs && unlikely(a))
+ diffs |= memcmp(a, b, sizeof(*a));
+ return diffs;
+}
+
+static inline bool
+psp_is_allowed_nondata(struct sk_buff *skb, struct psp_assoc *pas)
+{
+ bool fin = !!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN);
+ u32 end_seq = TCP_SKB_CB(skb)->end_seq;
+ u32 seq = TCP_SKB_CB(skb)->seq;
+ bool pure_fin;
+
+ pure_fin = fin && end_seq - seq == 1;
+
+ return seq == end_seq || (pure_fin && seq == pas->upgrade_seq);
+}
+
+static inline bool
+psp_pse_matches_pas(struct psp_skb_ext *pse, struct psp_assoc *pas)
+{
+ return pse && pas->rx.spi == pse->spi &&
+ pas->generation == pse->generation &&
+ pas->version == pse->version &&
+ pas->dev_id == pse->dev_id;
+}
+
+static inline enum skb_drop_reason
+__psp_sk_rx_policy_check(struct sk_buff *skb, struct psp_assoc *pas)
+{
+ struct psp_skb_ext *pse = skb_ext_find(skb, SKB_EXT_PSP);
+
+ if (!pas)
+ return pse ? SKB_DROP_REASON_PSP_INPUT : 0;
+
+ if (likely(psp_pse_matches_pas(pse, pas))) {
+ if (unlikely(!pas->peer_tx))
+ pas->peer_tx = 1;
+
+ return 0;
+ }
+
+ if (!pse) {
+ if (!pas->tx.spi ||
+ (!pas->peer_tx && psp_is_allowed_nondata(skb, pas)))
+ return 0;
+ }
+
+ return SKB_DROP_REASON_PSP_INPUT;
+}
+
+static inline enum skb_drop_reason
+psp_sk_rx_policy_check(struct sock *sk, struct sk_buff *skb)
+{
+ return __psp_sk_rx_policy_check(skb, psp_sk_assoc(sk));
+}
+
+static inline enum skb_drop_reason
+psp_twsk_rx_policy_check(struct inet_timewait_sock *tw, struct sk_buff *skb)
+{
+ return __psp_sk_rx_policy_check(skb, rcu_dereference(tw->psp_assoc));
+}
+
+static inline struct psp_assoc *psp_sk_get_assoc_rcu(const struct sock *sk)
+{
+ struct psp_assoc *pas;
+ int state;
+
+ state = READ_ONCE(sk->sk_state);
+ if (!sk_is_inet(sk) || state == TCP_NEW_SYN_RECV)
+ return NULL;
+
+ pas = state == TCP_TIME_WAIT ?
+ rcu_dereference(inet_twsk(sk)->psp_assoc) :
+ rcu_dereference(sk->psp_assoc);
+ return pas;
+}
+
+static inline struct psp_assoc *psp_skb_get_assoc_rcu(struct sk_buff *skb)
+{
+ if (!skb->decrypted || !skb->sk)
+ return NULL;
+
+ return psp_sk_get_assoc_rcu(skb->sk);
+}
+
+static inline unsigned int psp_sk_overhead(const struct sock *sk)
+{
+ int psp_encap = sizeof(struct udphdr) + PSP_HDR_SIZE + PSP_TRL_SIZE;
+ bool has_psp = rcu_access_pointer(sk->psp_assoc);
+
+ return has_psp ? psp_encap : 0;
+}
+#else
+static inline void psp_sk_assoc_free(struct sock *sk) { }
+static inline void
+psp_twsk_init(struct inet_timewait_sock *tw, const struct sock *sk) { }
+static inline void psp_twsk_assoc_free(struct inet_timewait_sock *tw) { }
+static inline void
+psp_reply_set_decrypted(struct sk_buff *skb) { }
+
+static inline struct psp_assoc *psp_sk_assoc(const struct sock *sk)
+{
+ return NULL;
+}
+
+static inline void
+psp_enqueue_set_decrypted(struct sock *sk, struct sk_buff *skb) { }
+
+static inline unsigned long
+__psp_skb_coalesce_diff(const struct sk_buff *one, const struct sk_buff *two,
+ unsigned long diffs)
+{
+ return diffs;
+}
+
+static inline enum skb_drop_reason
+psp_sk_rx_policy_check(struct sock *sk, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline enum skb_drop_reason
+psp_twsk_rx_policy_check(struct inet_timewait_sock *tw, struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline struct psp_assoc *psp_skb_get_assoc_rcu(struct sk_buff *skb)
+{
+ return NULL;
+}
+
+static inline unsigned int psp_sk_overhead(const struct sock *sk)
+{
+ return 0;
+}
+#endif
+
+static inline unsigned long
+psp_skb_coalesce_diff(const struct sk_buff *one, const struct sk_buff *two)
+{
+ return __psp_skb_coalesce_diff(one, two, 0);
+}
+
+#endif /* __NET_PSP_HELPERS_H */
diff --git a/include/net/psp/types.h b/include/net/psp/types.h
new file mode 100644
index 000000000000..31cee64b7c86
--- /dev/null
+++ b/include/net/psp/types.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __NET_PSP_H
+#define __NET_PSP_H
+
+#include <linux/mutex.h>
+#include <linux/refcount.h>
+
+struct netlink_ext_ack;
+
+#define PSP_DEFAULT_UDP_PORT 1000
+
+struct psphdr {
+ u8 nexthdr;
+ u8 hdrlen;
+ u8 crypt_offset;
+ u8 verfl;
+ __be32 spi;
+ __be64 iv;
+ __be64 vc[]; /* optional */
+};
+
+#define PSP_ENCAP_HLEN (sizeof(struct udphdr) + sizeof(struct psphdr))
+
+#define PSP_SPI_KEY_ID GENMASK(30, 0)
+#define PSP_SPI_KEY_PHASE BIT(31)
+
+#define PSPHDR_CRYPT_OFFSET GENMASK(5, 0)
+
+#define PSPHDR_VERFL_SAMPLE BIT(7)
+#define PSPHDR_VERFL_DROP BIT(6)
+#define PSPHDR_VERFL_VERSION GENMASK(5, 2)
+#define PSPHDR_VERFL_VIRT BIT(1)
+#define PSPHDR_VERFL_ONE BIT(0)
+
+#define PSP_HDRLEN_NOOPT ((sizeof(struct psphdr) - 8) / 8)
+
+/**
+ * struct psp_dev_config - PSP device configuration
+ * @versions: PSP versions enabled on the device
+ */
+struct psp_dev_config {
+ u32 versions;
+};
+
+/**
+ * struct psp_dev - PSP device struct
+ * @main_netdev: original netdevice of this PSP device
+ * @ops: driver callbacks
+ * @caps: device capabilities
+ * @drv_priv: driver priv pointer
+ * @lock: instance lock, protects all fields
+ * @refcnt: reference count for the instance
+ * @id: instance id
+ * @generation: current generation of the device key
+ * @config: current device configuration
+ * @active_assocs: list of registered associations
+ * @prev_assocs: associations which use old (but still usable)
+ * device key
+ * @stale_assocs: associations which use a rotated out key
+ *
+ * @rcu: RCU head for freeing the structure
+ */
+struct psp_dev {
+ struct net_device *main_netdev;
+
+ struct psp_dev_ops *ops;
+ struct psp_dev_caps *caps;
+ void *drv_priv;
+
+ struct mutex lock;
+ refcount_t refcnt;
+
+ u32 id;
+
+ u8 generation;
+
+ struct psp_dev_config config;
+
+ struct list_head active_assocs;
+ struct list_head prev_assocs;
+ struct list_head stale_assocs;
+
+ struct rcu_head rcu;
+};
+
+#define PSP_GEN_VALID_MASK 0x7f
+
+/**
+ * struct psp_dev_caps - PSP device capabilities
+ */
+struct psp_dev_caps {
+ /**
+ * @versions: mask of supported PSP versions
+ * Set this field to 0 to indicate PSP is not supported at all.
+ */
+ u32 versions;
+
+ /**
+ * @assoc_drv_spc: size of driver-specific state in Tx assoc
+ * Determines the size of struct psp_assoc::drv_data
+ */
+ u32 assoc_drv_spc;
+};
+
+#define PSP_MAX_KEY 32
+
+#define PSP_HDR_SIZE 16 /* We don't support optional fields, yet */
+#define PSP_TRL_SIZE 16 /* AES-GCM/GMAC trailer size */
+
+struct psp_skb_ext {
+ __be32 spi;
+ u16 dev_id;
+ u8 generation;
+ u8 version;
+};
+
+struct psp_key_parsed {
+ __be32 spi;
+ u8 key[PSP_MAX_KEY];
+};
+
+struct psp_assoc {
+ struct psp_dev *psd;
+
+ u16 dev_id;
+ u8 generation;
+ u8 version;
+ u8 peer_tx;
+
+ u32 upgrade_seq;
+
+ struct psp_key_parsed tx;
+ struct psp_key_parsed rx;
+
+ refcount_t refcnt;
+ struct rcu_head rcu;
+ struct work_struct work;
+ struct list_head assocs_list;
+
+ u8 drv_data[] __aligned(8);
+};
+
+/**
+ * struct psp_dev_ops - netdev driver facing PSP callbacks
+ */
+struct psp_dev_ops {
+ /**
+ * @set_config: set configuration of a PSP device
+ * Driver can inspect @psd->config for the previous configuration.
+ * Core will update @psd->config with @config on success.
+ */
+ int (*set_config)(struct psp_dev *psd, struct psp_dev_config *conf,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @key_rotate: rotate the device key
+ */
+ int (*key_rotate)(struct psp_dev *psd, struct netlink_ext_ack *extack);
+
+ /**
+ * @rx_spi_alloc: allocate an Rx SPI+key pair
+ * Allocate an Rx SPI and resulting derived key.
+ * This key should remain valid until key rotation.
+ */
+ int (*rx_spi_alloc)(struct psp_dev *psd, u32 version,
+ struct psp_key_parsed *assoc,
+ struct netlink_ext_ack *extack);
+
+ /**
+ * @tx_key_add: add a Tx key to the device
+ * Install an association in the device. Core will allocate space
+ * for the driver to use at drv_data.
+ */
+ int (*tx_key_add)(struct psp_dev *psd, struct psp_assoc *pas,
+ struct netlink_ext_ack *extack);
+ /**
+ * @tx_key_del: remove a Tx key from the device
+ * Remove an association from the device.
+ */
+ void (*tx_key_del)(struct psp_dev *psd, struct psp_assoc *pas);
+};
+
+#endif /* __NET_PSP_H */
diff --git a/include/net/raw.h b/include/net/raw.h
index 32a61481a253..66c0ffeada2e 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -81,6 +81,7 @@ struct raw_sock {
struct inet_sock inet;
struct icmp_filter filter;
u32 ipmr_table;
+ struct numa_drop_counters drop_counters;
};
#define raw_sk(ptr) container_of_const(ptr, struct raw_sock, inet.sk)
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 6a5ec1418e85..cd4d4cf71d0d 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -185,8 +185,8 @@ struct fastopen_queue {
struct request_sock_queue {
spinlock_t rskq_lock;
u8 rskq_defer_accept;
+ u8 synflood_warned;
- u32 synflood_warned;
atomic_t qlen;
atomic_t young;
diff --git a/include/net/route.h b/include/net/route.h
index 7ea840daa775..f90106f383c5 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -189,7 +189,7 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
{
struct flowi4 fl4 = {
.flowi4_oif = oif,
- .flowi4_tos = inet_dscp_to_dsfield(dscp),
+ .flowi4_dscp = dscp,
.flowi4_scope = scope,
.daddr = daddr,
.saddr = saddr,
@@ -390,7 +390,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
const struct net *net;
rcu_read_lock();
- net = dev_net_rcu(dst_dev(dst));
+ net = dst_dev_net_rcu(dst);
hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl);
rcu_read_unlock();
}
diff --git a/include/net/rps.h b/include/net/rps.h
index d8ab3a08bcc4..f1794cd2e7fb 100644
--- a/include/net/rps.h
+++ b/include/net/rps.h
@@ -25,13 +25,16 @@ struct rps_map {
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
- * tail pointer for that CPU's input queue at the time of last enqueue, and
- * a hardware filter index.
+ * tail pointer for that CPU's input queue at the time of last enqueue, a
+ * hardware filter index, and the hash of the flow if aRFS is enabled.
*/
struct rps_dev_flow {
u16 cpu;
u16 filter;
unsigned int last_qtail;
+#ifdef CONFIG_RFS_ACCEL
+ u32 hash;
+#endif
};
#define RPS_NO_FILTER 0xffff
@@ -82,11 +85,8 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
WRITE_ONCE(table->ents[index], val);
}
-#endif /* CONFIG_RPS */
-
-static inline void sock_rps_record_flow_hash(__u32 hash)
+static inline void _sock_rps_record_flow_hash(__u32 hash)
{
-#ifdef CONFIG_RPS
struct rps_sock_flow_table *sock_flow_table;
if (!hash)
@@ -96,42 +96,33 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
if (sock_flow_table)
rps_record_sock_flow(sock_flow_table, hash);
rcu_read_unlock();
-#endif
}
-static inline void sock_rps_record_flow(const struct sock *sk)
+static inline void _sock_rps_record_flow(const struct sock *sk)
{
-#ifdef CONFIG_RPS
- if (static_branch_unlikely(&rfs_needed)) {
- /* Reading sk->sk_rxhash might incur an expensive cache line
- * miss.
- *
- * TCP_ESTABLISHED does cover almost all states where RFS
- * might be useful, and is cheaper [1] than testing :
- * IPv4: inet_sk(sk)->inet_daddr
- * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
- * OR an additional socket flag
- * [1] : sk_state and sk_prot are in the same cache line.
+ /* Reading sk->sk_rxhash might incur an expensive cache line
+ * miss.
+ *
+ * TCP_ESTABLISHED does cover almost all states where RFS
+ * might be useful, and is cheaper [1] than testing :
+ * IPv4: inet_sk(sk)->inet_daddr
+ * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
+ * OR an additional socket flag
+ * [1] : sk_state and sk_prot are in the same cache line.
+ */
+ if (sk->sk_state == TCP_ESTABLISHED) {
+ /* This READ_ONCE() is paired with the WRITE_ONCE()
+ * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
- if (sk->sk_state == TCP_ESTABLISHED) {
- /* This READ_ONCE() is paired with the WRITE_ONCE()
- * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
- */
- sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
- }
+ _sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
-#endif
}
-static inline void sock_rps_delete_flow(const struct sock *sk)
+static inline void _sock_rps_delete_flow(const struct sock *sk)
{
-#ifdef CONFIG_RPS
struct rps_sock_flow_table *table;
u32 hash, index;
- if (!static_branch_unlikely(&rfs_needed))
- return;
-
hash = READ_ONCE(sk->sk_rxhash);
if (!hash)
return;
@@ -144,6 +135,45 @@ static inline void sock_rps_delete_flow(const struct sock *sk)
WRITE_ONCE(table->ents[index], RPS_NO_CPU);
}
rcu_read_unlock();
+}
+#endif /* CONFIG_RPS */
+
+static inline bool rfs_is_needed(void)
+{
+#ifdef CONFIG_RPS
+ return static_branch_unlikely(&rfs_needed);
+#else
+ return false;
+#endif
+}
+
+static inline void sock_rps_record_flow_hash(__u32 hash)
+{
+#ifdef CONFIG_RPS
+ if (!rfs_is_needed())
+ return;
+
+ _sock_rps_record_flow_hash(hash);
+#endif
+}
+
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (!rfs_is_needed())
+ return;
+
+ _sock_rps_record_flow(sk);
+#endif
+}
+
+static inline void sock_rps_delete_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+ if (!rfs_is_needed())
+ return;
+
+ _sock_rps_delete_flow(sk);
#endif
}
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index d4b3b2dcd15b..3d5879e08e78 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -22,16 +22,11 @@ struct sctp_endpoint;
struct sctp_association;
struct sctp_authkey;
struct sctp_hmacalgo;
-struct crypto_shash;
-/*
- * Define a generic struct that will hold all the info
- * necessary for an HMAC transform
- */
+/* Defines an HMAC algorithm supported by SCTP chunk authentication */
struct sctp_hmac {
- __u16 hmac_id; /* one of the above ids */
- char *hmac_name; /* name for loading */
- __u16 hmac_len; /* length of the signature */
+ __u16 hmac_id; /* one of SCTP_AUTH_HMAC_ID_* */
+ __u16 hmac_len; /* length of the HMAC value in bytes */
};
/* This is generic structure that containst authentication bytes used
@@ -78,9 +73,9 @@ int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
struct sctp_association *asoc,
gfp_t gfp);
int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
-void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]);
-struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
-struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
+const struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
+const struct sctp_hmac *
+sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
struct sctp_hmac_algo_param *hmacs);
int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc,
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 5859e0a16a58..ae3376ba0b99 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -296,9 +296,8 @@ enum { SCTP_MAX_GABS = 16 };
*/
#define SCTP_DEFAULT_MINSEGMENT 512 /* MTU size ... if no mtu disc */
-#define SCTP_SECRET_SIZE 32 /* Number of octets in a 256 bits. */
-
-#define SCTP_SIGNATURE_SIZE 20 /* size of a SLA-1 signature */
+#define SCTP_COOKIE_KEY_SIZE 32 /* size of cookie HMAC key */
+#define SCTP_COOKIE_MAC_SIZE 32 /* size of HMAC field in cookies */
#define SCTP_COOKIE_MULTIPLE 32 /* Pad out our cookie to make our hash
* functions simpler to write.
@@ -417,16 +416,12 @@ enum {
SCTP_AUTH_HMAC_ID_RESERVED_0,
SCTP_AUTH_HMAC_ID_SHA1,
SCTP_AUTH_HMAC_ID_RESERVED_2,
-#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
SCTP_AUTH_HMAC_ID_SHA256,
-#endif
__SCTP_AUTH_HMAC_MAX
};
#define SCTP_AUTH_HMAC_ID_MAX __SCTP_AUTH_HMAC_MAX - 1
#define SCTP_AUTH_NUM_HMACS __SCTP_AUTH_HMAC_MAX
-#define SCTP_SHA1_SIG_SIZE 20
-#define SCTP_SHA256_SIG_SIZE 32
/* SCTP-AUTH, Section 3.2
* The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH chunks
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 8a540ad9b509..2ae390219efd 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -32,6 +32,7 @@
#ifndef __sctp_structs_h__
#define __sctp_structs_h__
+#include <crypto/sha2.h>
#include <linux/ktime.h>
#include <linux/generic-radix-tree.h>
#include <linux/rhashtable-types.h>
@@ -68,7 +69,6 @@ struct sctp_outq;
struct sctp_bind_addr;
struct sctp_ulpq;
struct sctp_ep_common;
-struct crypto_shash;
struct sctp_stream;
@@ -155,10 +155,6 @@ struct sctp_sock {
/* PF_ family specific functions. */
struct sctp_pf *pf;
- /* Access to HMAC transform. */
- struct crypto_shash *hmac;
- char *sctp_hmac_alg;
-
/* What is our base endpointer? */
struct sctp_endpoint *ep;
@@ -227,7 +223,8 @@ struct sctp_sock {
frag_interleave:1,
recvrcvinfo:1,
recvnxtinfo:1,
- data_ready_signalled:1;
+ data_ready_signalled:1,
+ cookie_auth_enable:1;
atomic_t pd_mode;
@@ -335,7 +332,7 @@ struct sctp_cookie {
/* The format of our cookie that we send to our peer. */
struct sctp_signed_cookie {
- __u8 signature[SCTP_SECRET_SIZE];
+ __u8 mac[SCTP_COOKIE_MAC_SIZE];
__u32 __pad; /* force sctp_cookie alignment to 64 bits */
struct sctp_cookie c;
} __packed;
@@ -1307,33 +1304,15 @@ struct sctp_endpoint {
/* This is really a list of struct sctp_association entries. */
struct list_head asocs;
- /* Secret Key: A secret key used by this endpoint to compute
- * the MAC. This SHOULD be a cryptographic quality
- * random number with a sufficient length.
- * Discussion in [RFC1750] can be helpful in
- * selection of the key.
- */
- __u8 secret_key[SCTP_SECRET_SIZE];
-
- /* digest: This is a digest of the sctp cookie. This field is
- * only used on the receive path when we try to validate
- * that the cookie has not been tampered with. We put
- * this here so we pre-allocate this once and can re-use
- * on every receive.
- */
- __u8 *digest;
-
+ /* Cookie authentication key used by this endpoint */
+ struct hmac_sha256_key cookie_auth_key;
+
/* sendbuf acct. policy. */
__u32 sndbuf_policy;
/* rcvbuf acct. policy. */
__u32 rcvbuf_policy;
- /* SCTP AUTH: array of the HMACs that will be allocated
- * we need this per association so that we don't serialize
- */
- struct crypto_shash **auth_hmacs;
-
/* SCTP-AUTH: hmacs for the endpoint encoded into parameter */
struct sctp_hmac_algo_param *auth_hmacs_list;
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 24f733b3e3fe..e9f41725933e 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -9,6 +9,8 @@
#ifndef _NET_SEG6_HMAC_H
#define _NET_SEG6_HMAC_H
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
@@ -19,7 +21,6 @@
#include <linux/seg6_hmac.h>
#include <linux/rhashtable-types.h>
-#define SEG6_HMAC_MAX_DIGESTSIZE 160
#define SEG6_HMAC_RING_SIZE 256
struct seg6_hmac_info {
@@ -27,16 +28,15 @@ struct seg6_hmac_info {
struct rcu_head rcu;
u32 hmackeyid;
+ /* The raw key, kept only so it can be returned back to userspace */
char secret[SEG6_HMAC_SECRET_LEN];
u8 slen;
u8 alg_id;
-};
-
-struct seg6_hmac_algo {
- u8 alg_id;
- char name[64];
- struct crypto_shash * __percpu *tfms;
- struct shash_desc * __percpu *shashs;
+ /* The prepared key, which the calculations actually use */
+ union {
+ struct hmac_sha1_key sha1;
+ struct hmac_sha256_key sha256;
+ } key;
};
extern int seg6_hmac_compute(struct seg6_hmac_info *hinfo,
@@ -50,13 +50,9 @@ extern int seg6_push_hmac(struct net *net, struct in6_addr *saddr,
struct ipv6_sr_hdr *srh);
extern bool seg6_hmac_validate_skb(struct sk_buff *skb);
#ifdef CONFIG_IPV6_SEG6_HMAC
-extern int seg6_hmac_init(void);
-extern void seg6_hmac_exit(void);
extern int seg6_hmac_net_init(struct net *net);
extern void seg6_hmac_net_exit(struct net *net);
#else
-static inline int seg6_hmac_init(void) { return 0; }
-static inline void seg6_hmac_exit(void) {}
static inline int seg6_hmac_net_init(struct net *net) { return 0; }
static inline void seg6_hmac_net_exit(struct net *net) {}
#endif
diff --git a/include/net/smc.h b/include/net/smc.h
index db84e4e35080..08bee529ed8d 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -15,7 +15,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
-#include "linux/ism.h"
+#include <linux/dibs.h>
struct sock;
@@ -27,62 +27,15 @@ struct smc_hashinfo {
};
/* SMCD/ISM device driver interface */
-struct smcd_dmb {
- u64 dmb_tok;
- u64 rgid;
- u32 dmb_len;
- u32 sba_idx;
- u32 vlan_valid;
- u32 vlan_id;
- void *cpu_addr;
- dma_addr_t dma_addr;
-};
-
-#define ISM_EVENT_DMB 0
-#define ISM_EVENT_GID 1
-#define ISM_EVENT_SWR 2
-
#define ISM_RESERVED_VLANID 0x1FFF
-#define ISM_ERROR 0xFFFF
-
-struct smcd_dev;
-
struct smcd_gid {
u64 gid;
u64 gid_ext;
};
-struct smcd_ops {
- int (*query_remote_gid)(struct smcd_dev *dev, struct smcd_gid *rgid,
- u32 vid_valid, u32 vid);
- int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb,
- void *client);
- int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
- int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
- bool sf, unsigned int offset, void *data,
- unsigned int size);
- int (*supports_v2)(void);
- void (*get_local_gid)(struct smcd_dev *dev, struct smcd_gid *gid);
- u16 (*get_chid)(struct smcd_dev *dev);
- struct device* (*get_dev)(struct smcd_dev *dev);
-
- /* optional operations */
- int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
- int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
- int (*set_vlan_required)(struct smcd_dev *dev);
- int (*reset_vlan_required)(struct smcd_dev *dev);
- int (*signal_event)(struct smcd_dev *dev, struct smcd_gid *rgid,
- u32 trigger_irq, u32 event_code, u64 info);
- int (*support_dmb_nocopy)(struct smcd_dev *dev);
- int (*attach_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
- int (*detach_dmb)(struct smcd_dev *dev, u64 token);
-};
-
struct smcd_dev {
- const struct smcd_ops *ops;
- void *priv;
- void *client;
+ struct dibs_dev *dibs;
struct list_head list;
spinlock_t lock;
struct smc_connection **conn;
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 4cb4326dfebe..584e70742e9b 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -36,11 +36,6 @@ struct snmp_mib {
.entry = _entry, \
}
-#define SNMP_MIB_SENTINEL { \
- .name = NULL, \
- .entry = 0, \
-}
-
/*
* We use unsigned longs for most mibs but u64 for ipstats.
*/
diff --git a/include/net/sock.h b/include/net/sock.h
index 2e14283c5be1..60bcb13f045c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -249,6 +249,7 @@ struct sk_filter;
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
+ * @psp_assoc: PSP association, if socket is PSP-secured
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -282,6 +283,7 @@ struct sk_filter;
* @sk_err_soft: errors that don't cause failure but are the cause of a
* persistent failure not just 'timed out'
* @sk_drops: raw/udp drops counter
+ * @sk_drop_counters: optional pointer to numa_drop_counters
* @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen()
* @sk_uid: user id of owner
@@ -444,10 +446,15 @@ struct sock {
__cacheline_group_begin(sock_read_rxtx);
int sk_err;
struct socket *sk_socket;
+#ifdef CONFIG_MEMCG
struct mem_cgroup *sk_memcg;
+#endif
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
+#if IS_ENABLED(CONFIG_INET_PSP)
+ struct psp_assoc __rcu *psp_assoc;
+#endif
__cacheline_group_end(sock_read_rxtx);
__cacheline_group_begin(sock_write_rxtx);
@@ -460,7 +467,7 @@ struct sock {
__cacheline_group_begin(sock_write_tx);
int sk_write_pending;
atomic_t sk_omem_alloc;
- int sk_sndbuf;
+ int sk_err_soft;
int sk_wmem_queued;
refcount_t sk_wmem_alloc;
@@ -485,6 +492,9 @@ struct sock {
long sk_sndtimeo;
u32 sk_priority;
u32 sk_mark;
+ kuid_t sk_uid;
+ u16 sk_protocol;
+ u16 sk_type;
struct dst_entry __rcu *sk_dst_cache;
netdev_features_t sk_route_caps;
#ifdef CONFIG_SOCK_VALIDATE_XMIT
@@ -497,6 +507,7 @@ struct sock {
unsigned int sk_gso_max_size;
gfp_t sk_allocation;
u32 sk_txhash;
+ int sk_sndbuf;
u8 sk_pacing_shift;
bool sk_use_task_frag;
__cacheline_group_end(sock_read_tx);
@@ -510,15 +521,11 @@ struct sock {
sk_no_check_tx : 1,
sk_no_check_rx : 1;
u8 sk_shutdown;
- u16 sk_type;
- u16 sk_protocol;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
- int sk_err_soft;
u32 sk_ack_backlog;
u32 sk_max_ack_backlog;
- kuid_t sk_uid;
unsigned long sk_ino;
spinlock_t sk_peer_lock;
int sk_bind_phc;
@@ -564,6 +571,7 @@ struct sock {
#ifdef CONFIG_BPF_SYSCALL
struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
+ struct numa_drop_counters *sk_drop_counters;
struct rcu_head sk_rcu;
netns_tracker ns_tracker;
struct xarray sk_user_frags;
@@ -1346,8 +1354,6 @@ struct proto {
unsigned int useroffset; /* Usercopy region offset */
unsigned int usersize; /* Usercopy region size */
- unsigned int __percpu *orphan_count;
-
struct request_sock_ops *rsk_prot;
struct timewait_sock_ops *twsk_prot;
@@ -1488,6 +1494,10 @@ static inline int __sk_prot_rehash(struct sock *sk)
#define SOCK_BINDADDR_LOCK 4
#define SOCK_BINDPORT_LOCK 8
+/**
+ * define SOCK_CONNECT_BIND - &sock->sk_userlocks flag for auto-bind at connect() time
+ */
+#define SOCK_CONNECT_BIND 16
struct socket_alloc {
struct socket socket;
@@ -2604,6 +2614,50 @@ static inline gfp_t gfp_memcg_charge(void)
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
}
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
+{
+ return sk->sk_memcg;
+}
+
+static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
+{
+ return mem_cgroup_sockets_enabled && mem_cgroup_from_sk(sk);
+}
+
+static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_sk(sk);
+
+#ifdef CONFIG_MEMCG_V1
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return !!memcg->tcpmem_pressure;
+#endif /* CONFIG_MEMCG_V1 */
+
+ do {
+ if (time_before64(get_jiffies_64(), mem_cgroup_get_socket_pressure(memcg)))
+ return true;
+ } while ((memcg = parent_mem_cgroup(memcg)));
+
+ return false;
+}
+#else
+static inline struct mem_cgroup *mem_cgroup_from_sk(const struct sock *sk)
+{
+ return NULL;
+}
+
+static inline bool mem_cgroup_sk_enabled(const struct sock *sk)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_sk_under_memory_pressure(const struct sock *sk)
+{
+ return false;
+}
+#endif
+
static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
{
return noblock ? 0 : READ_ONCE(sk->sk_rcvtimeo);
@@ -2646,18 +2700,53 @@ struct sock_skb_cb {
#define sock_skb_cb_check_size(size) \
BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
+static inline void sk_drops_add(struct sock *sk, int segs)
+{
+ struct numa_drop_counters *ndc = sk->sk_drop_counters;
+
+ if (ndc)
+ numa_drop_add(ndc, segs);
+ else
+ atomic_add(segs, &sk->sk_drops);
+}
+
+static inline void sk_drops_inc(struct sock *sk)
+{
+ sk_drops_add(sk, 1);
+}
+
+static inline int sk_drops_read(const struct sock *sk)
+{
+ const struct numa_drop_counters *ndc = sk->sk_drop_counters;
+
+ if (ndc) {
+ DEBUG_NET_WARN_ON_ONCE(atomic_read(&sk->sk_drops));
+ return numa_drop_read(ndc);
+ }
+ return atomic_read(&sk->sk_drops);
+}
+
+static inline void sk_drops_reset(struct sock *sk)
+{
+ struct numa_drop_counters *ndc = sk->sk_drop_counters;
+
+ if (ndc)
+ numa_drop_reset(ndc);
+ atomic_set(&sk->sk_drops, 0);
+}
+
static inline void
sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
{
SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
- atomic_read(&sk->sk_drops) : 0;
+ sk_drops_read(sk) : 0;
}
-static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+static inline void sk_drops_skbadd(struct sock *sk, const struct sk_buff *skb)
{
int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
- atomic_add(segs, &sk->sk_drops);
+ sk_drops_add(sk, segs);
}
static inline ktime_t sock_read_timestamp(struct sock *sk)
@@ -2876,28 +2965,6 @@ sk_requests_wifi_status(struct sock *sk)
return sk && sk_fullsock(sk) && sock_flag(sk, SOCK_WIFI_STATUS);
}
-/* Checks if this SKB belongs to an HW offloaded socket
- * and whether any SW fallbacks are required based on dev.
- * Check decrypted mark in case skb_orphan() cleared socket.
- */
-static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
- struct net_device *dev)
-{
-#ifdef CONFIG_SOCK_VALIDATE_XMIT
- struct sock *sk = skb->sk;
-
- if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
- skb = sk->sk_validate_xmit_skb(sk, dev, skb);
- } else if (unlikely(skb_is_decrypted(skb))) {
- pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
- kfree_skb(skb);
- skb = NULL;
- }
-#endif
-
- return skb;
-}
-
/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
* SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
*/
@@ -2934,8 +3001,8 @@ void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
*/
#define _SK_MEM_PACKETS 256
#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
-#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
-#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_WMEM_DEFAULT (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_RMEM_DEFAULT (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
index 7c240d2fed4e..626704cd6241 100644
--- a/include/net/tc_act/tc_skbmod.h
+++ b/include/net/tc_act/tc_skbmod.h
@@ -12,6 +12,7 @@
struct tcf_skbmod_params {
struct rcu_head rcu;
u64 flags; /*up to 64 types of operations; extend if needed */
+ int action;
u8 eth_dst[ETH_ALEN];
u16 eth_type;
u8 eth_src[ETH_ALEN];
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index 879fe8cff581..0f1925f97520 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -14,6 +14,7 @@
struct tcf_tunnel_key_params {
struct rcu_head rcu;
int tcft_action;
+ int action;
struct metadata_dst *tcft_enc_metadata;
};
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 3f5e9242b5e8..beadee41669a 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -10,6 +10,7 @@
#include <linux/tc_act/tc_vlan.h>
struct tcf_vlan_params {
+ int action;
int tcfv_action;
unsigned char tcfv_push_dst[ETH_ALEN];
unsigned char tcfv_push_src[ETH_ALEN];
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 526a26e7a150..5ca230ed526a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -54,6 +54,16 @@ extern struct inet_hashinfo tcp_hashinfo;
DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
int tcp_orphan_count_sum(void);
+static inline void tcp_orphan_count_inc(void)
+{
+ this_cpu_inc(tcp_orphan_count);
+}
+
+static inline void tcp_orphan_count_dec(void)
+{
+ this_cpu_dec(tcp_orphan_count);
+}
+
DECLARE_PER_CPU(u32, tcp_tw_isn);
void tcp_time_wait(struct sock *sk, int state, int timeo);
@@ -90,6 +100,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/* Maximal number of window scale according to RFC1323 */
#define TCP_MAX_WSCALE 14U
+/* Default sending frequency of accurate ECN option per RTT */
+#define TCP_ACCECN_OPTION_BEACON 3
+
/* urg_data states */
#define TCP_URG_VALID 0x0100
#define TCP_URG_NOTYET 0x0200
@@ -203,6 +216,8 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
+#define TCPOPT_ACCECN0 172 /* 0xAC: Accurate ECN Order 0 */
+#define TCPOPT_ACCECN1 174 /* 0xAE: Accurate ECN Order 1 */
#define TCPOPT_EXP 254 /* Experimental */
/* Magic number to be after the option value for sharing TCP
* experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -220,6 +235,7 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
#define TCPOLEN_FASTOPEN_BASE 2
+#define TCPOLEN_ACCECN_BASE 2
#define TCPOLEN_EXP_FASTOPEN_BASE 4
#define TCPOLEN_EXP_SMC_BASE 6
@@ -233,6 +249,14 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
#define TCPOLEN_MD5SIG_ALIGNED 20
#define TCPOLEN_MSS_ALIGNED 4
#define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
+#define TCPOLEN_ACCECN_PERFIELD 3
+
+/* Maximum number of byte counters in AccECN option + size */
+#define TCP_ACCECN_NUMFIELDS 3
+#define TCP_ACCECN_MAXSIZE (TCPOLEN_ACCECN_BASE + \
+ TCPOLEN_ACCECN_PERFIELD * \
+ TCP_ACCECN_NUMFIELDS)
+#define TCP_ACCECN_SAFETY_SHIFT 1 /* SAFETY_FACTOR in accecn draft */
/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
@@ -275,8 +299,8 @@ extern unsigned long tcp_memory_pressure;
/* optimized version of sk_under_memory_pressure() for TCP sockets */
static inline bool tcp_under_memory_pressure(const struct sock *sk)
{
- if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ if (mem_cgroup_sk_enabled(sk) &&
+ mem_cgroup_sk_under_memory_pressure(sk))
return true;
return READ_ONCE(tcp_memory_pressure);
@@ -346,6 +370,7 @@ void tcp_delack_timer_handler(struct sock *sk);
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
+void tcp_rcvbuf_grow(struct sock *sk);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
@@ -811,33 +836,6 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
}
-static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
-{
- /* mptcp hooks are only on the slow path */
- if (sk_is_mptcp((struct sock *)tp))
- return;
-
- tp->pred_flags = htonl((tp->tcp_header_len << 26) |
- ntohl(TCP_FLAG_ACK) |
- snd_wnd);
-}
-
-static inline void tcp_fast_path_on(struct tcp_sock *tp)
-{
- __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
-}
-
-static inline void tcp_fast_path_check(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
- tp->rcv_wnd &&
- atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
- !tp->urg_data)
- tcp_fast_path_on(tp);
-}
-
u32 tcp_delack_max(const struct sock *sk);
/* Compute the actual rto_min value */
@@ -989,6 +987,18 @@ static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
#define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+#define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR)
+
+#define TCP_ACCECN_CEP_ACE_MASK 0x7
+#define TCP_ACCECN_ACE_MAX_DELTA 6
+
+/* To avoid/detect middlebox interference, not all counters start at 0.
+ * See draft-ietf-tcpm-accurate-ecn for the latest values.
+ */
+#define TCP_ACCECN_CEP_INIT_OFFSET 5
+#define TCP_ACCECN_E1B_INIT_OFFSET 1
+#define TCP_ACCECN_E0B_INIT_OFFSET 1
+#define TCP_ACCECN_CEB_INIT_OFFSET 0
/* State flags for sacked in struct tcp_skb_cb */
enum tcp_skb_cb_sacked_flags {
@@ -1797,6 +1807,40 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
return true;
}
+static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
+{
+ u32 ace;
+
+ /* mptcp hooks are only on the slow path */
+ if (sk_is_mptcp((struct sock *)tp))
+ return;
+
+ ace = tcp_ecn_mode_accecn(tp) ?
+ ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) &
+ TCP_ACCECN_CEP_ACE_MASK) : 0;
+
+ tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ (ace << 22) |
+ ntohl(TCP_FLAG_ACK) |
+ snd_wnd);
+}
+
+static inline void tcp_fast_path_on(struct tcp_sock *tp)
+{
+ __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
+}
+
+static inline void tcp_fast_path_check(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
+ tp->rcv_wnd &&
+ atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+ !tp->urg_data)
+ tcp_fast_path_on(tp);
+}
+
bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
int mib_idx, u32 *last_oow_ack_time);
@@ -1931,6 +1975,7 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
}
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
+void tcp_md5_destruct_sock(struct sock *sk);
#else
static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock *sk, int l3index,
@@ -1947,6 +1992,9 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
}
#define tcp_twsk_md5_key(twsk) NULL
+static inline void tcp_md5_destruct_sock(struct sock *sk)
+{
+}
#endif
int tcp_md5_alloc_sigpool(void);
@@ -2612,7 +2660,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
*/
static inline void tcp_listendrop(const struct sock *sk)
{
- atomic_inc(&((struct sock *)sk)->sk_drops);
+ sk_drops_inc((struct sock *)sk);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
}
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index df655ce6987d..1e9e27d6e06b 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -130,7 +130,6 @@ struct tcp_ao_info {
u32 snd_sne;
u32 rcv_sne;
refcount_t refcnt; /* Protects twsk destruction */
- struct rcu_head rcu;
};
#ifdef CONFIG_TCP_MD5SIG
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
new file mode 100644
index 000000000000..f13e5cd2b1ac
--- /dev/null
+++ b/include/net/tcp_ecn.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _TCP_ECN_H
+#define _TCP_ECN_H
+
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/bitfield.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/inet_ecn.h>
+
+/* The highest ECN variant (Accurate ECN, ECN, or no ECN) that is
+ * attemped to be negotiated and requested for incoming connection
+ * and outgoing connection, respectively.
+ */
+enum tcp_ecn_mode {
+ TCP_ECN_IN_NOECN_OUT_NOECN = 0,
+ TCP_ECN_IN_ECN_OUT_ECN = 1,
+ TCP_ECN_IN_ECN_OUT_NOECN = 2,
+ TCP_ECN_IN_ACCECN_OUT_ACCECN = 3,
+ TCP_ECN_IN_ACCECN_OUT_ECN = 4,
+ TCP_ECN_IN_ACCECN_OUT_NOECN = 5,
+};
+
+/* AccECN option sending when AccECN has been successfully negotiated */
+enum tcp_accecn_option {
+ TCP_ACCECN_OPTION_DISABLED = 0,
+ TCP_ACCECN_OPTION_MINIMUM = 1,
+ TCP_ACCECN_OPTION_FULL = 2,
+};
+
+static inline void tcp_ecn_queue_cwr(struct tcp_sock *tp)
+{
+ /* Do not set CWR if in AccECN mode! */
+ if (tcp_ecn_mode_rfc3168(tp))
+ tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
+}
+
+static inline void tcp_ecn_accept_cwr(struct sock *sk,
+ const struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tcp_ecn_mode_rfc3168(tp) && tcp_hdr(skb)->cwr) {
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+
+ /* If the sender is telling us it has entered CWR, then its
+ * cwnd may be very low (even just 1 packet), so we should ACK
+ * immediately.
+ */
+ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ }
+}
+
+static inline void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
+{
+ tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
+}
+
+/* tp->accecn_fail_mode */
+#define TCP_ACCECN_ACE_FAIL_SEND BIT(0)
+#define TCP_ACCECN_ACE_FAIL_RECV BIT(1)
+#define TCP_ACCECN_OPT_FAIL_SEND BIT(2)
+#define TCP_ACCECN_OPT_FAIL_RECV BIT(3)
+
+static inline bool tcp_accecn_ace_fail_send(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_ACE_FAIL_SEND;
+}
+
+static inline bool tcp_accecn_ace_fail_recv(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_ACE_FAIL_RECV;
+}
+
+static inline bool tcp_accecn_opt_fail_send(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_OPT_FAIL_SEND;
+}
+
+static inline bool tcp_accecn_opt_fail_recv(const struct tcp_sock *tp)
+{
+ return tp->accecn_fail_mode & TCP_ACCECN_OPT_FAIL_RECV;
+}
+
+static inline void tcp_accecn_fail_mode_set(struct tcp_sock *tp, u8 mode)
+{
+ tp->accecn_fail_mode |= mode;
+}
+
+#define TCP_ACCECN_OPT_NOT_SEEN 0x0
+#define TCP_ACCECN_OPT_EMPTY_SEEN 0x1
+#define TCP_ACCECN_OPT_COUNTER_SEEN 0x2
+#define TCP_ACCECN_OPT_FAIL_SEEN 0x3
+
+static inline u8 tcp_accecn_ace(const struct tcphdr *th)
+{
+ return (th->ae << 2) | (th->cwr << 1) | th->ece;
+}
+
+/* Infer the ECT value our SYN arrived with from the echoed ACE field */
+static inline int tcp_accecn_extract_syn_ect(u8 ace)
+{
+ /* Below is an excerpt from the 1st block of Table 2 of AccECN spec */
+ static const int ace_to_ecn[8] = {
+ INET_ECN_ECT_0, /* 0b000 (Undefined) */
+ INET_ECN_ECT_1, /* 0b001 (Undefined) */
+ INET_ECN_NOT_ECT, /* 0b010 (Not-ECT is received) */
+ INET_ECN_ECT_1, /* 0b011 (ECT-1 is received) */
+ INET_ECN_ECT_0, /* 0b100 (ECT-0 is received) */
+ INET_ECN_ECT_1, /* 0b101 (Reserved) */
+ INET_ECN_CE, /* 0b110 (CE is received) */
+ INET_ECN_ECT_1 /* 0b111 (Undefined) */
+ };
+
+ return ace_to_ecn[ace & 0x7];
+}
+
+/* Check ECN field transition to detect invalid transitions */
+static inline bool tcp_ect_transition_valid(u8 snt, u8 rcv)
+{
+ if (rcv == snt)
+ return true;
+
+ /* Non-ECT altered to something or something became non-ECT */
+ if (snt == INET_ECN_NOT_ECT || rcv == INET_ECN_NOT_ECT)
+ return false;
+ /* CE -> ECT(0/1)? */
+ if (snt == INET_ECN_CE)
+ return false;
+ return true;
+}
+
+static inline bool tcp_accecn_validate_syn_feedback(struct sock *sk, u8 ace,
+ u8 sent_ect)
+{
+ u8 ect = tcp_accecn_extract_syn_ect(ace);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
+ return true;
+
+ if (!tcp_ect_transition_valid(sent_ect, ect)) {
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_RECV);
+ return false;
+ }
+
+ return true;
+}
+
+static inline void tcp_accecn_saw_opt_fail_recv(struct tcp_sock *tp,
+ u8 saw_opt)
+{
+ tp->saw_accecn_opt = saw_opt;
+ if (tp->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN)
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_OPT_FAIL_RECV);
+}
+
+/* Validate the 3rd ACK based on the ACE field, see Table 4 of AccECN spec */
+static inline void tcp_accecn_third_ack(struct sock *sk,
+ const struct sk_buff *skb, u8 sent_ect)
+{
+ u8 ace = tcp_accecn_ace(tcp_hdr(skb));
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ switch (ace) {
+ case 0x0:
+ /* Invalid value */
+ tcp_accecn_fail_mode_set(tp, TCP_ACCECN_ACE_FAIL_RECV);
+ break;
+ case 0x7:
+ case 0x5:
+ case 0x1:
+ /* Unused but legal values */
+ break;
+ default:
+ /* Validation only applies to first non-data packet */
+ if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq &&
+ !TCP_SKB_CB(skb)->sacked &&
+ tcp_accecn_validate_syn_feedback(sk, ace, sent_ect)) {
+ if ((tcp_accecn_extract_syn_ect(ace) == INET_ECN_CE) &&
+ !tp->delivered_ce)
+ tp->delivered_ce++;
+ }
+ break;
+ }
+}
+
+/* Demand the minimum # to send AccECN optnio */
+static inline void tcp_accecn_opt_demand_min(struct sock *sk,
+ u8 opt_demand_min)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u8 opt_demand;
+
+ opt_demand = max_t(u8, opt_demand_min, tp->accecn_opt_demand);
+ tp->accecn_opt_demand = opt_demand;
+}
+
+/* Maps IP ECN field ECT/CE code point to AccECN option field number, given
+ * we are sending fields with Accurate ECN Order 1: ECT(1), CE, ECT(0).
+ */
+static inline u8 tcp_ecnfield_to_accecn_optfield(u8 ecnfield)
+{
+ switch (ecnfield & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
+ return 0; /* AccECN does not send counts of NOT_ECT */
+ case INET_ECN_ECT_1:
+ return 1;
+ case INET_ECN_CE:
+ return 2;
+ case INET_ECN_ECT_0:
+ return 3;
+ }
+ return 0;
+}
+
+/* Maps IP ECN field ECT/CE code point to AccECN option field value offset.
+ * Some fields do not start from zero, to detect zeroing by middleboxes.
+ */
+static inline u32 tcp_accecn_field_init_offset(u8 ecnfield)
+{
+ switch (ecnfield & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
+ return 0; /* AccECN does not send counts of NOT_ECT */
+ case INET_ECN_ECT_1:
+ return TCP_ACCECN_E1B_INIT_OFFSET;
+ case INET_ECN_CE:
+ return TCP_ACCECN_CEB_INIT_OFFSET;
+ case INET_ECN_ECT_0:
+ return TCP_ACCECN_E0B_INIT_OFFSET;
+ }
+ return 0;
+}
+
+/* Maps AccECN option field #nr to IP ECN field ECT/CE bits */
+static inline unsigned int tcp_accecn_optfield_to_ecnfield(unsigned int option,
+ bool order)
+{
+ /* Based on Table 5 of the AccECN spec to map (option, order) to
+ * the corresponding ECN conuters (ECT-1, ECT-0, or CE).
+ */
+ static const u8 optfield_lookup[2][3] = {
+ /* order = 0: 1st field ECT-0, 2nd field CE, 3rd field ECT-1 */
+ { INET_ECN_ECT_0, INET_ECN_CE, INET_ECN_ECT_1 },
+ /* order = 1: 1st field ECT-1, 2nd field CE, 3rd field ECT-0 */
+ { INET_ECN_ECT_1, INET_ECN_CE, INET_ECN_ECT_0 }
+ };
+
+ return optfield_lookup[order][option % 3];
+}
+
+/* Handles AccECN option ECT and CE 24-bit byte counters update into
+ * the u32 value in tcp_sock. As we're processing TCP options, it is
+ * safe to access from - 1.
+ */
+static inline s32 tcp_update_ecn_bytes(u32 *cnt, const char *from,
+ u32 init_offset)
+{
+ u32 truncated = (get_unaligned_be32(from - 1) - init_offset) &
+ 0xFFFFFFU;
+ u32 delta = (truncated - *cnt) & 0xFFFFFFU;
+
+ /* If delta has the highest bit set (24th bit) indicating
+ * negative, sign extend to correct an estimation using
+ * sign_extend32(delta, 24 - 1)
+ */
+ delta = sign_extend32(delta, 23);
+ *cnt += delta;
+ return (s32)delta;
+}
+
+/* Updates Accurate ECN received counters from the received IP ECN field */
+static inline void tcp_ecn_received_counters(struct sock *sk,
+ const struct sk_buff *skb, u32 len)
+{
+ u8 ecnfield = TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK;
+ u8 is_ce = INET_ECN_is_ce(ecnfield);
+ struct tcp_sock *tp = tcp_sk(sk);
+ bool ecn_edge;
+
+ if (!INET_ECN_is_not_ect(ecnfield)) {
+ u32 pcount = is_ce * max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ /* As for accurate ECN, the TCP_ECN_SEEN flag is set by
+ * tcp_ecn_received_counters() when the ECN codepoint of
+ * received TCP data or ACK contains ECT(0), ECT(1), or CE.
+ */
+ if (!tcp_ecn_mode_rfc3168(tp))
+ tp->ecn_flags |= TCP_ECN_SEEN;
+
+ /* ACE counter tracks *all* segments including pure ACKs */
+ tp->received_ce += pcount;
+ tp->received_ce_pending = min(tp->received_ce_pending + pcount,
+ 0xfU);
+
+ if (len > 0) {
+ u8 minlen = tcp_ecnfield_to_accecn_optfield(ecnfield);
+ u32 oldbytes = tp->received_ecn_bytes[ecnfield - 1];
+ u32 bytes_mask = GENMASK_U32(31, 22);
+
+ tp->received_ecn_bytes[ecnfield - 1] += len;
+ tp->accecn_minlen = max_t(u8, tp->accecn_minlen,
+ minlen);
+
+ /* Send AccECN option at least once per 2^22-byte
+ * increase in any ECN byte counter.
+ */
+ if ((tp->received_ecn_bytes[ecnfield - 1] ^ oldbytes) &
+ bytes_mask) {
+ tcp_accecn_opt_demand_min(sk, 1);
+ }
+ }
+ }
+
+ ecn_edge = tp->prev_ecnfield != ecnfield;
+ if (ecn_edge || is_ce) {
+ tp->prev_ecnfield = ecnfield;
+ /* Demand Accurate ECN change-triggered ACKs. Two ACK are
+ * demanded to indicate unambiguously the ecnfield value
+ * in the latter ACK.
+ */
+ if (tcp_ecn_mode_accecn(tp)) {
+ if (ecn_edge)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ tp->accecn_opt_demand = 2;
+ }
+ }
+}
+
+/* AccECN specification, 2.2: [...] A Data Receiver maintains four counters
+ * initialized at the start of the half-connection. [...] These byte counters
+ * reflect only the TCP payload length, excluding TCP header and TCP options.
+ */
+static inline void tcp_ecn_received_counters_payload(struct sock *sk,
+ const struct sk_buff *skb)
+{
+ const struct tcphdr *th = (const struct tcphdr *)skb->data;
+
+ tcp_ecn_received_counters(sk, skb, skb->len - th->doff * 4);
+}
+
+/* AccECN specification, 5.1: [...] a server can determine that it
+ * negotiated AccECN as [...] if the ACK contains an ACE field with
+ * the value 0b010 to 0b111 (decimal 2 to 7).
+ */
+static inline bool cookie_accecn_ok(const struct tcphdr *th)
+{
+ return tcp_accecn_ace(th) > 0x1;
+}
+
+/* Used to form the ACE flags for SYN/ACK */
+static inline u16 tcp_accecn_reflector_flags(u8 ect)
+{
+ /* TCP ACE flags of SYN/ACK are set based on IP-ECN received from SYN.
+ * Below is an excerpt from the 1st block of Table 2 of AccECN spec,
+ * in which TCP ACE flags are encoded as: (AE << 2) | (CWR << 1) | ECE
+ */
+ static const u8 ecn_to_ace_flags[4] = {
+ 0b010, /* Not-ECT is received */
+ 0b011, /* ECT(1) is received */
+ 0b100, /* ECT(0) is received */
+ 0b110 /* CE is received */
+ };
+
+ return FIELD_PREP(TCPHDR_ACE, ecn_to_ace_flags[ect & 0x3]);
+}
+
+/* AccECN specification, 3.1.2: If a TCP server that implements AccECN
+ * receives a SYN with the three TCP header flags (AE, CWR and ECE) set
+ * to any combination other than 000, 011 or 111, it MUST negotiate the
+ * use of AccECN as if they had been set to 111.
+ */
+static inline bool tcp_accecn_syn_requested(const struct tcphdr *th)
+{
+ u8 ace = tcp_accecn_ace(th);
+
+ return ace && ace != 0x3;
+}
+
+static inline void __tcp_accecn_init_bytes_counters(int *counter_array)
+{
+ BUILD_BUG_ON(INET_ECN_ECT_1 != 0x1);
+ BUILD_BUG_ON(INET_ECN_ECT_0 != 0x2);
+ BUILD_BUG_ON(INET_ECN_CE != 0x3);
+
+ counter_array[INET_ECN_ECT_1 - 1] = 0;
+ counter_array[INET_ECN_ECT_0 - 1] = 0;
+ counter_array[INET_ECN_CE - 1] = 0;
+}
+
+static inline void tcp_accecn_init_counters(struct tcp_sock *tp)
+{
+ tp->received_ce = 0;
+ tp->received_ce_pending = 0;
+ __tcp_accecn_init_bytes_counters(tp->received_ecn_bytes);
+ __tcp_accecn_init_bytes_counters(tp->delivered_ecn_bytes);
+ tp->accecn_minlen = 0;
+ tp->accecn_opt_demand = 0;
+ tp->est_ecnfield = 0;
+}
+
+/* Used for make_synack to form the ACE flags */
+static inline void tcp_accecn_echo_syn_ect(struct tcphdr *th, u8 ect)
+{
+ /* TCP ACE flags of SYN/ACK are set based on IP-ECN codepoint received
+ * from SYN. Below is an excerpt from Table 2 of the AccECN spec:
+ * +====================+====================================+
+ * | IP-ECN codepoint | Respective ACE falgs on SYN/ACK |
+ * | received on SYN | AE CWR ECE |
+ * +====================+====================================+
+ * | Not-ECT | 0 1 0 |
+ * | ECT(1) | 0 1 1 |
+ * | ECT(0) | 1 0 0 |
+ * | CE | 1 1 0 |
+ * +====================+====================================+
+ */
+ th->ae = !!(ect & INET_ECN_ECT_0);
+ th->cwr = ect != INET_ECN_ECT_0;
+ th->ece = ect == INET_ECN_ECT_1;
+}
+
+static inline void tcp_accecn_set_ace(struct tcp_sock *tp, struct sk_buff *skb,
+ struct tcphdr *th)
+{
+ u32 wire_ace;
+
+ /* The final packet of the 3WHS or anything like it must reflect
+ * the SYN/ACK ECT instead of putting CEP into ACE field, such
+ * case show up in tcp_flags.
+ */
+ if (likely(!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACE))) {
+ wire_ace = tp->received_ce + TCP_ACCECN_CEP_INIT_OFFSET;
+ th->ece = !!(wire_ace & 0x1);
+ th->cwr = !!(wire_ace & 0x2);
+ th->ae = !!(wire_ace & 0x4);
+ tp->received_ce_pending = 0;
+ }
+}
+
+static inline u8 tcp_accecn_option_init(const struct sk_buff *skb,
+ u8 opt_offset)
+{
+ u8 *ptr = skb_transport_header(skb) + opt_offset;
+ unsigned int optlen = ptr[1] - 2;
+
+ if (WARN_ON_ONCE(ptr[0] != TCPOPT_ACCECN0 && ptr[0] != TCPOPT_ACCECN1))
+ return TCP_ACCECN_OPT_FAIL_SEEN;
+ ptr += 2;
+
+ /* Detect option zeroing: an AccECN connection "MAY check that the
+ * initial value of the EE0B field or the EE1B field is non-zero"
+ */
+ if (optlen < TCPOLEN_ACCECN_PERFIELD)
+ return TCP_ACCECN_OPT_EMPTY_SEEN;
+ if (get_unaligned_be24(ptr) == 0)
+ return TCP_ACCECN_OPT_FAIL_SEEN;
+ if (optlen < TCPOLEN_ACCECN_PERFIELD * 3)
+ return TCP_ACCECN_OPT_COUNTER_SEEN;
+ ptr += TCPOLEN_ACCECN_PERFIELD * 2;
+ if (get_unaligned_be24(ptr) == 0)
+ return TCP_ACCECN_OPT_FAIL_SEEN;
+
+ return TCP_ACCECN_OPT_COUNTER_SEEN;
+}
+
+/* See Table 2 of the AccECN draft */
+static inline void tcp_ecn_rcv_synack(struct sock *sk, const struct sk_buff *skb,
+ const struct tcphdr *th, u8 ip_dsfield)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u8 ace = tcp_accecn_ace(th);
+
+ switch (ace) {
+ case 0x0:
+ case 0x7:
+ /* +========+========+============+=============+
+ * | A | B | SYN/ACK | Feedback |
+ * | | | B->A | Mode of A |
+ * | | | AE CWR ECE | |
+ * +========+========+============+=============+
+ * | AccECN | No ECN | 0 0 0 | Not ECN |
+ * | AccECN | Broken | 1 1 1 | Not ECN |
+ * +========+========+============+=============+
+ */
+ tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
+ break;
+ case 0x1:
+ case 0x5:
+ /* +========+========+============+=============+
+ * | A | B | SYN/ACK | Feedback |
+ * | | | B->A | Mode of A |
+ * | | | AE CWR ECE | |
+ * +========+========+============+=============+
+ * | AccECN | Nonce | 1 0 1 | (Reserved) |
+ * | AccECN | ECN | 0 0 1 | Classic ECN |
+ * | Nonce | AccECN | 0 0 1 | Classic ECN |
+ * | ECN | AccECN | 0 0 1 | Classic ECN |
+ * +========+========+============+=============+
+ */
+ if (tcp_ecn_mode_pending(tp))
+ /* Downgrade from AccECN, or requested initially */
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
+ break;
+ default:
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
+ tp->syn_ect_rcv = ip_dsfield & INET_ECN_MASK;
+ if (tp->rx_opt.accecn &&
+ tp->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) {
+ u8 saw_opt = tcp_accecn_option_init(skb, tp->rx_opt.accecn);
+
+ tcp_accecn_saw_opt_fail_recv(tp, saw_opt);
+ tp->accecn_opt_demand = 2;
+ }
+ if (INET_ECN_is_ce(ip_dsfield) &&
+ tcp_accecn_validate_syn_feedback(sk, ace,
+ tp->syn_ect_snt)) {
+ tp->received_ce++;
+ tp->received_ce_pending++;
+ }
+ break;
+ }
+}
+
+static inline void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th,
+ const struct sk_buff *skb)
+{
+ if (tcp_ecn_mode_pending(tp)) {
+ if (!tcp_accecn_syn_requested(th)) {
+ /* Downgrade to classic ECN feedback */
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
+ } else {
+ tp->syn_ect_rcv = TCP_SKB_CB(skb)->ip_dsfield &
+ INET_ECN_MASK;
+ tp->prev_ecnfield = tp->syn_ect_rcv;
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN);
+ }
+ }
+ if (tcp_ecn_mode_rfc3168(tp) && (!th->ece || !th->cwr))
+ tcp_ecn_mode_set(tp, TCP_ECN_DISABLED);
+}
+
+static inline bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp,
+ const struct tcphdr *th)
+{
+ if (th->ece && !th->syn && tcp_ecn_mode_rfc3168(tp))
+ return true;
+ return false;
+}
+
+/* Packet ECN state for a SYN-ACK */
+static inline void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
+ if (tcp_ecn_disabled(tp))
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
+ else if (tcp_ca_needs_ecn(sk) ||
+ tcp_bpf_ca_needs_ecn(sk))
+ INET_ECN_xmit(sk);
+
+ if (tp->ecn_flags & TCP_ECN_MODE_ACCECN) {
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ACE;
+ TCP_SKB_CB(skb)->tcp_flags |=
+ tcp_accecn_reflector_flags(tp->syn_ect_rcv);
+ tp->syn_ect_snt = inet_sk(sk)->tos & INET_ECN_MASK;
+ }
+}
+
+/* Packet ECN state for a SYN. */
+static inline void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
+ bool use_ecn, use_accecn;
+ u8 tcp_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
+
+ use_accecn = tcp_ecn == TCP_ECN_IN_ACCECN_OUT_ACCECN;
+ use_ecn = tcp_ecn == TCP_ECN_IN_ECN_OUT_ECN ||
+ tcp_ecn == TCP_ECN_IN_ACCECN_OUT_ECN ||
+ tcp_ca_needs_ecn(sk) || bpf_needs_ecn || use_accecn;
+
+ if (!use_ecn) {
+ const struct dst_entry *dst = __sk_dst_get(sk);
+
+ if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
+ use_ecn = true;
+ }
+
+ tp->ecn_flags = 0;
+
+ if (use_ecn) {
+ if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
+ INET_ECN_xmit(sk);
+
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
+ if (use_accecn) {
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_AE;
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_PENDING);
+ tp->syn_ect_snt = inet_sk(sk)->tos & INET_ECN_MASK;
+ } else {
+ tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168);
+ }
+ }
+}
+
+static inline void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
+{
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)) {
+ /* tp->ecn_flags are cleared at a later point in time when
+ * SYN ACK is ultimatively being received.
+ */
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ACE;
+ }
+}
+
+static inline void
+tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
+{
+ if (tcp_rsk(req)->accecn_ok)
+ tcp_accecn_echo_syn_ect(th, tcp_rsk(req)->syn_ect_rcv);
+ else if (inet_rsk(req)->ecn_ok)
+ th->ece = 1;
+}
+
+static inline bool tcp_accecn_option_beacon_check(const struct sock *sk)
+{
+ u32 ecn_beacon = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_option_beacon);
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ if (!ecn_beacon)
+ return false;
+
+ return tcp_stamp_us_delta(tp->tcp_mstamp, tp->accecn_opt_tstamp) * ecn_beacon >=
+ (tp->srtt_us >> 3);
+}
+
+#endif /* _LINUX_TCP_ECN_H */
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 62b3e9f2aed4..0a85ac64a66d 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -15,13 +15,6 @@ struct timewait_sock_ops {
struct kmem_cache *twsk_slab;
char *twsk_slab_name;
unsigned int twsk_obj_size;
- void (*twsk_destructor)(struct sock *sk);
};
-static inline void twsk_destructor(struct sock *sk)
-{
- if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
- sk->sk_prot->twsk_prot->twsk_destructor(sk);
-}
-
#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/udp.h b/include/net/udp.h
index e2af3bda90c9..cffedb3e40f2 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -284,14 +284,28 @@ INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, bool is_ipv6);
-static inline void udp_lib_init_sock(struct sock *sk)
+static inline int udp_lib_init_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
+ sk->sk_drop_counters = &up->drop_counters;
skb_queue_head_init(&up->reader_queue);
INIT_HLIST_NODE(&up->tunnel_list);
up->forward_threshold = sk->sk_rcvbuf >> 2;
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
+
+ up->udp_prod_queue = kcalloc(nr_node_ids, sizeof(*up->udp_prod_queue),
+ GFP_KERNEL);
+ if (!up->udp_prod_queue)
+ return -ENOMEM;
+ for (int i = 0; i < nr_node_ids; i++)
+ init_llist_head(&up->udp_prod_queue[i].ll_root);
+ return 0;
+}
+
+static inline void udp_drops_inc(struct sock *sk)
+{
+ numa_drop_add(&udp_sk(sk)->drop_counters, 1);
}
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
@@ -397,7 +411,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
return __skb_recv_udp(sk, flags, &off, err);
}
-int udp_v4_early_demux(struct sk_buff *skb);
+enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
@@ -627,7 +641,7 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
return segs;
drop:
- atomic_add(drop_count, &sk->sk_drops);
+ sk_drops_add(sk, drop_count);
SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
kfree_skb(skb);
return NULL;
diff --git a/include/net/xdp.h b/include/net/xdp.h
index f288c348a6c1..aa742f413c35 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -76,6 +76,11 @@ enum xdp_buff_flags {
XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under
* pressure
*/
+ /* frags have unreadable mem, this can't be true for real XDP packets,
+ * but drivers may use XDP helpers to construct Rx pkt state even when
+ * XDP program is not attached.
+ */
+ XDP_FLAGS_FRAGS_UNREADABLE = BIT(2),
};
struct xdp_buff {
@@ -85,8 +90,20 @@ struct xdp_buff {
void *data_hard_start;
struct xdp_rxq_info *rxq;
struct xdp_txq_info *txq;
- u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
- u32 flags; /* supported values defined in xdp_buff_flags */
+
+ union {
+ struct {
+ /* frame size to deduce data_hard_end/tailroom */
+ u32 frame_sz;
+ /* supported values defined in xdp_buff_flags */
+ u32 flags;
+ };
+
+#ifdef __LITTLE_ENDIAN
+ /* Used to micro-optimize xdp_init_buff(), don't use directly */
+ u64 frame_sz_flags_init;
+#endif
+ };
};
static __always_inline bool xdp_buff_has_frags(const struct xdp_buff *xdp)
@@ -104,15 +121,19 @@ static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
}
-static __always_inline bool
-xdp_buff_is_frag_pfmemalloc(const struct xdp_buff *xdp)
+static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
{
- return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+ xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
}
-static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp)
+static __always_inline void xdp_buff_set_frag_unreadable(struct xdp_buff *xdp)
{
- xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC;
+ xdp->flags |= XDP_FLAGS_FRAGS_UNREADABLE;
+}
+
+static __always_inline u32 xdp_buff_get_skb_flags(const struct xdp_buff *xdp)
+{
+ return xdp->flags;
}
static __always_inline void xdp_buff_clear_frag_pfmemalloc(struct xdp_buff *xdp)
@@ -123,9 +144,19 @@ static __always_inline void xdp_buff_clear_frag_pfmemalloc(struct xdp_buff *xdp)
static __always_inline void
xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
{
- xdp->frame_sz = frame_sz;
xdp->rxq = rxq;
+
+#ifdef __LITTLE_ENDIAN
+ /*
+ * Force the compilers to initialize ::flags and assign ::frame_sz with
+ * one write on 64-bit LE architectures as they're often unable to do
+ * it themselves.
+ */
+ xdp->frame_sz_flags_init = frame_sz;
+#else
+ xdp->frame_sz = frame_sz;
xdp->flags = 0;
+#endif
}
static __always_inline void
@@ -254,6 +285,8 @@ static inline bool xdp_buff_add_frag(struct xdp_buff *xdp, netmem_ref netmem,
if (unlikely(netmem_is_pfmemalloc(netmem)))
xdp_buff_set_frag_pfmemalloc(xdp);
+ if (unlikely(netmem_is_net_iov(netmem)))
+ xdp_buff_set_frag_unreadable(xdp);
return true;
}
@@ -277,10 +310,10 @@ static __always_inline bool xdp_frame_has_frags(const struct xdp_frame *frame)
return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
}
-static __always_inline bool
-xdp_frame_is_frag_pfmemalloc(const struct xdp_frame *frame)
+static __always_inline u32
+xdp_frame_get_skb_flags(const struct xdp_frame *frame)
{
- return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+ return frame->flags;
}
#define XDP_BULK_QUEUE_SIZE 16
@@ -317,9 +350,9 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame)
}
static inline void
-xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
- unsigned int size, unsigned int truesize,
- bool pfmemalloc)
+xdp_update_skb_frags_info(struct sk_buff *skb, u8 nr_frags,
+ unsigned int size, unsigned int truesize,
+ u32 xdp_flags)
{
struct skb_shared_info *sinfo = skb_shinfo(skb);
@@ -333,7 +366,8 @@ xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags,
skb->len += size;
skb->data_len += size;
skb->truesize += truesize;
- skb->pfmemalloc |= pfmemalloc;
+ skb->pfmemalloc |= !!(xdp_flags & XDP_FLAGS_FRAGS_PF_MEMALLOC);
+ skb->unreadable |= !!(xdp_flags & XDP_FLAGS_FRAGS_UNREADABLE);
}
/* Avoids inlining WARN macro in fast-path */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 3f1b58d8b4bf..8bd0e1eb393b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -48,6 +48,7 @@
#define IB_MGMT_METHOD_REPORT 0x06
#define IB_MGMT_METHOD_REPORT_RESP 0x86
#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
+#define IB_MGMT_METHOD_GET_TABLE 0x12
#define IB_MGMT_METHOD_RESP 0x80
#define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index b46353fc53bf..95e8924ad563 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -189,6 +189,20 @@ struct sa_path_rec {
u32 flags;
};
+struct sa_service_rec {
+ __be64 id;
+ __u8 gid[16];
+ __be16 pkey;
+ __u8 reserved[2];
+ __be32 lease;
+ __u8 key[16];
+ __u8 name[64];
+ __u8 data_8[16];
+ __be16 data_16[8];
+ __be32 data_32[4];
+ __be64 data_64[2];
+};
+
static inline enum ib_gid_type
sa_conv_pathrec_to_gid_type(struct sa_path_rec *rec)
{
@@ -417,6 +431,17 @@ int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
unsigned int num_prs, void *context),
void *context, struct ib_sa_query **query);
+int ib_sa_service_rec_get(struct ib_sa_client *client,
+ struct ib_device *device, u32 port_num,
+ struct sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct sa_service_rec *resp,
+ unsigned int num_services,
+ void *context),
+ void *context, struct ib_sa_query **sa_query);
+
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
ib_sa_comp_mask comp_mask;
@@ -509,6 +534,18 @@ int ib_init_ah_attr_from_path(struct ib_device *device, u32 port_num,
void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute);
/**
+ * ib_sa_pack_service - Convert a service record from struct ib_sa_service_rec
+ * to IB MAD wire format.
+ */
+void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute);
+
+/**
+ * ib_sa_unpack_service - Convert a service record from MAD format to struct
+ * ib_sa_service_rec.
+ */
+void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec);
+
+/**
* ib_sa_unpack_path - Convert a path record from MAD format to struct
* ib_sa_path_rec.
*/
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index d1593ad47e28..9bd930a83e6e 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -33,7 +33,11 @@ enum rdma_cm_event_type {
RDMA_CM_EVENT_MULTICAST_JOIN,
RDMA_CM_EVENT_MULTICAST_ERROR,
RDMA_CM_EVENT_ADDR_CHANGE,
- RDMA_CM_EVENT_TIMEWAIT_EXIT
+ RDMA_CM_EVENT_TIMEWAIT_EXIT,
+ RDMA_CM_EVENT_ADDRINFO_RESOLVED,
+ RDMA_CM_EVENT_ADDRINFO_ERROR,
+ RDMA_CM_EVENT_USER,
+ RDMA_CM_EVENT_INTERNAL,
};
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event);
@@ -63,6 +67,9 @@ struct rdma_route {
* 2 - Both primary and alternate path are available
*/
int num_pri_alt_paths;
+
+ unsigned int num_service_recs;
+ struct sa_service_rec *service_recs;
};
struct rdma_conn_param {
@@ -93,6 +100,7 @@ struct rdma_cm_event {
union {
struct rdma_conn_param conn;
struct rdma_ud_param ud;
+ u64 arg;
} param;
struct rdma_ucm_ece ece;
};
@@ -198,6 +206,17 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms);
/**
+ * rdma_resolve_ib_service - Resolve the IB service record of the
+ * service with the given service ID or name.
+ *
+ * This function is optional in the rdma cm flow. It is called on the client
+ * side of a connection, before calling rdma_resolve_route. The resolution
+ * can be done once per rdma_cm_id.
+ */
+int rdma_resolve_ib_service(struct rdma_cm_id *id,
+ struct rdma_ucm_ib_service *ibs);
+
+/**
* rdma_create_qp - Allocate a QP and associate it with the specified RDMA
* identifier.
*
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ba460b6c0374..a0635b128d7a 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -203,6 +203,14 @@ static inline bool dev_is_expander(enum sas_device_type type)
type == SAS_FANOUT_EXPANDER_DEVICE;
}
+static inline bool dev_parent_is_expander(struct domain_device *dev)
+{
+ if (!dev->parent)
+ return false;
+
+ return dev_is_expander(dev->parent->dev_type);
+}
+
static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
{
INIT_WORK(&sw->work, fn);
@@ -685,7 +693,7 @@ extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
extern int sas_target_alloc(struct scsi_target *);
int sas_sdev_configure(struct scsi_device *dev, struct queue_limits *lim);
extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
-extern int sas_bios_param(struct scsi_device *, struct block_device *,
+extern int sas_bios_param(struct scsi_device *, struct gendisk *,
sector_t capacity, int *hsc);
int sas_execute_internal_abort_single(struct domain_device *device,
u16 tag, unsigned int qid,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index c53812b9026f..f5a243261236 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -318,7 +318,7 @@ struct scsi_host_template {
*
* Status: OPTIONAL
*/
- int (* bios_param)(struct scsi_device *, struct block_device *,
+ int (* bios_param)(struct scsi_device *, struct gendisk *,
sector_t, int []);
/*
diff --git a/include/scsi/scsicam.h b/include/scsi/scsicam.h
index 08edd603e521..1131f51ed2c8 100644
--- a/include/scsi/scsicam.h
+++ b/include/scsi/scsicam.h
@@ -13,7 +13,8 @@
#ifndef SCSICAM_H
#define SCSICAM_H
-int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip);
-bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]);
-unsigned char *scsi_bios_ptable(struct block_device *bdev);
+struct gendisk;
+int scsicam_bios_param(struct gendisk *disk, sector_t capacity, int *ip);
+bool scsi_partsize(struct gendisk *disk, sector_t capacity, int geom[3]);
+unsigned char *scsi_bios_ptable(struct gendisk *disk);
#endif /* def SCSICAM_H */
diff --git a/include/soc/at91/sama7-sfrbu.h b/include/soc/at91/sama7-sfrbu.h
index 76b740810d34..8cee48d1ae2c 100644
--- a/include/soc/at91/sama7-sfrbu.h
+++ b/include/soc/at91/sama7-sfrbu.h
@@ -18,13 +18,6 @@
#define AT91_SFRBU_PSWBU_SOFTSWITCH (1 << 1) /* Power switch BU source selection */
#define AT91_SFRBU_PSWBU_CTRL (1 << 0) /* Power switch BU control */
-#define AT91_SFRBU_25LDOCR (0x0C) /* SFRBU 2.5V LDO Control Register */
-#define AT91_SFRBU_25LDOCR_LDOANAKEY (0x3B6E18 << 8) /* Specific value mandatory to allow writing of other register bits. */
-#define AT91_SFRBU_25LDOCR_STATE (1 << 3) /* LDOANA Switch On/Off Control */
-#define AT91_SFRBU_25LDOCR_LP (1 << 2) /* LDOANA Low-Power Mode Control */
-#define AT91_SFRBU_PD_VALUE_MSK (0x3)
-#define AT91_SFRBU_25LDOCR_PD_VALUE(v) ((v) & AT91_SFRBU_PD_VALUE_MSK) /* LDOANA Pull-down value */
-
#define AT91_FRBU_DDRPWR (0x10) /* SFRBU DDR Power Control Register */
#define AT91_FRBU_DDRPWR_STATE (1 << 0) /* DDR Power Mode State */
diff --git a/include/soc/rockchip/rk3588_grf.h b/include/soc/rockchip/rk3588_grf.h
index 630b35a55064..02a7b2432d99 100644
--- a/include/soc/rockchip/rk3588_grf.h
+++ b/include/soc/rockchip/rk3588_grf.h
@@ -12,7 +12,11 @@
#define RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3 GENMASK(13, 12)
#define RK3588_PMUGRF_OS_REG3_SYSREG_VERSION GENMASK(31, 28)
-#define RK3588_PMUGRF_OS_REG4 0x210
-#define RK3588_PMUGRF_OS_REG5 0x214
+#define RK3588_PMUGRF_OS_REG4 0x210
+#define RK3588_PMUGRF_OS_REG5 0x214
+#define RK3588_PMUGRF_OS_REG6 0x218
+#define RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE GENMASK(2, 1)
+/* Whether the LPDDR5 is in 2:1 (= 0) or 4:1 (= 1) CKR a.k.a. DQS mode */
+#define RK3588_PMUGRF_OS_REG6_LP5_CKR BIT(0)
#endif /* __SOC_RK3588_GRF_H */
diff --git a/include/soc/rockchip/rockchip_grf.h b/include/soc/rockchip/rockchip_grf.h
index e46fd72aea8d..41c7bb26fd53 100644
--- a/include/soc/rockchip/rockchip_grf.h
+++ b/include/soc/rockchip/rockchip_grf.h
@@ -13,6 +13,7 @@ enum {
ROCKCHIP_DDRTYPE_LPDDR3 = 6,
ROCKCHIP_DDRTYPE_LPDDR4 = 7,
ROCKCHIP_DDRTYPE_LPDDR4X = 8,
+ ROCKCHIP_DDRTYPE_LPDDR5 = 9,
};
#endif /* __SOC_ROCKCHIP_GRF_H */
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index b55c9eeb2b54..9e3d801e45ec 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -161,7 +161,7 @@ struct snd_compr_ops {
struct snd_compr_metadata *metadata);
int (*trigger)(struct snd_compr_stream *stream, int cmd);
int (*pointer)(struct snd_compr_stream *stream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int (*copy)(struct snd_compr_stream *stream, char __user *buf,
size_t count);
int (*mmap)(struct snd_compr_stream *stream,
diff --git a/include/sound/cs-amp-lib.h b/include/sound/cs-amp-lib.h
index 5459c221badf..43a87a39110c 100644
--- a/include/sound/cs-amp-lib.h
+++ b/include/sound/cs-amp-lib.h
@@ -49,6 +49,7 @@ int cs_amp_write_cal_coeffs(struct cs_dsp *dsp,
const struct cirrus_amp_cal_data *data);
int cs_amp_get_efi_calibration_data(struct device *dev, u64 target_uid, int amp_index,
struct cirrus_amp_cal_data *out_data);
+int cs_amp_get_vendor_spkid(struct device *dev);
struct cs_amp_test_hooks {
efi_status_t (*get_efi_variable)(efi_char16_t *name,
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 7c8bbe8ad1e2..ab044ce2aa8b 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -85,7 +85,9 @@
#define CS35L56_DSP1_XMEM_UNPACKED24_0 0x2800000
#define CS35L56_DSP1_FW_VER 0x2800010
#define CS35L56_DSP1_HALO_STATE 0x28021E0
+#define CS35L56_B2_DSP1_HALO_STATE 0x2803D20
#define CS35L56_DSP1_PM_CUR_STATE 0x2804308
+#define CS35L56_B2_DSP1_PM_CUR_STATE 0x2804678
#define CS35L56_DSP1_XMEM_UNPACKED24_8191 0x2807FFC
#define CS35L56_DSP1_CORE_BASE 0x2B80000
#define CS35L56_DSP1_SCRATCH1 0x2B805C0
@@ -337,9 +339,6 @@ extern const struct regmap_config cs35l56_regmap_sdw;
extern const struct regmap_config cs35l63_regmap_i2c;
extern const struct regmap_config cs35l63_regmap_sdw;
-extern const struct cs35l56_fw_reg cs35l56_fw_reg;
-extern const struct cs35l56_fw_reg cs35l63_fw_reg;
-
extern const struct cirrus_amp_cal_controls cs35l56_calibration_controls;
extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 1ef13bcdc43f..9472f0a966a2 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -69,6 +69,10 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @peripheral_config: peripheral configuration for programming peripheral
* for dmaengine transfer
* @peripheral_size: peripheral configuration buffer size
+ * @port_window_size: The length of the register area in words the data need
+ * to be accessed on the device side. It is only used for devices which is using
+ * an area instead of a single register to send/receive the data. Typically the
+ * DMA loops in this area in order to transfer the data.
*/
struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
@@ -80,6 +84,7 @@ struct snd_dmaengine_dai_dma_data {
unsigned int flags;
void *peripheral_config;
size_t peripheral_size;
+ u32 port_window_size;
};
void snd_dmaengine_pcm_set_config_from_dai_data(
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 38db50b280eb..4f94565c9d15 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1842,8 +1842,7 @@ unsigned int snd_emu10k1_ptr20_read(struct snd_emu10k1 * emu, unsigned int reg,
void snd_emu10k1_ptr20_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
int snd_emu10k1_spi_write(struct snd_emu10k1 * emu, unsigned int data);
int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
-static inline void snd_emu1010_fpga_lock(struct snd_emu10k1 *emu) { mutex_lock(&emu->emu1010.lock); };
-static inline void snd_emu1010_fpga_unlock(struct snd_emu10k1 *emu) { mutex_unlock(&emu->emu1010.lock); };
+DEFINE_GUARD(snd_emu1010_fpga_lock, struct snd_emu10k1 *, mutex_lock(&(_T)->emu1010.lock), mutex_unlock(&(_T)->emu1010.lock))
void snd_emu1010_fpga_write_lock(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value);
diff --git a/include/sound/gus.h b/include/sound/gus.h
index 1c8fb6c93e50..321ae93625eb 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -515,7 +515,6 @@ struct _SND_IW_LFO_PROGRAM {
/* gus_mem.c */
-void snd_gf1_mem_lock(struct snd_gf1_mem * alloc, int xup);
int snd_gf1_mem_xfree(struct snd_gf1_mem * alloc, struct snd_gf1_mem_block * block);
struct snd_gf1_mem_block *snd_gf1_mem_alloc(struct snd_gf1_mem * alloc, int owner,
char *name, int size, int w_16,
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index ddc9c392f93f..5d9f0ef228af 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -360,8 +360,8 @@ int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums,
int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t nid, int recursive);
unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
- u8 *dev_list, int max_devices);
+unsigned int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
+ u8 *dev_list, unsigned int max_devices);
int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id);
@@ -503,6 +503,36 @@ static inline bool hda_codec_need_resume(struct hda_codec *codec)
return !codec->relaxed_resume && codec->jacktbl.used;
}
+/*
+ * PM with auto-cleanup: call like CLASS(snd_hda_power, pm)(codec)
+ * If the error handling is needed, refer pm.err.
+ */
+struct __hda_power_obj {
+ struct hda_codec *codec;
+ int err;
+};
+
+static inline struct __hda_power_obj __snd_hda_power_up(struct hda_codec *codec)
+{
+ struct __hda_power_obj T = { .codec = codec };
+ T.err = snd_hda_power_up(codec);
+ return T;
+}
+
+static inline struct __hda_power_obj __snd_hda_power_up_pm(struct hda_codec *codec)
+{
+ struct __hda_power_obj T = { .codec = codec };
+ T.err = snd_hda_power_up_pm(codec);
+ return T;
+}
+
+DEFINE_CLASS(snd_hda_power, struct __hda_power_obj,
+ snd_hda_power_down((_T).codec), __snd_hda_power_up(codec),
+ struct hda_codec *codec)
+DEFINE_CLASS(snd_hda_power_pm, struct __hda_power_obj,
+ snd_hda_power_down_pm((_T).codec), __snd_hda_power_up_pm(codec),
+ struct hda_codec *codec)
+
#ifdef CONFIG_SND_HDA_PATCH_LOADER
/*
* patch firmware
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index d38234f8fe44..4e0c1d8af09f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -651,6 +651,7 @@ int snd_hdac_stream_set_lpib(struct hdac_stream *azx_dev, u32 value);
#define snd_hdac_dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex)
#define snd_hdac_dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex)
#define snd_hdac_stream_is_locked(dev) ((dev)->locked)
+DEFINE_GUARD(snd_hdac_dsp_lock, struct hdac_stream *, snd_hdac_dsp_lock(_T), snd_hdac_dsp_unlock(_T))
/* DSP loader helpers */
int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
unsigned int byte_size, struct snd_dma_buffer *bufp);
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 2caa807c6249..d78cda866888 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -47,7 +47,7 @@ struct snd_compress_ops {
struct snd_compr_stream *stream, int cmd);
int (*pointer)(struct snd_soc_component *component,
struct snd_compr_stream *stream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int (*copy)(struct snd_soc_component *component,
struct snd_compr_stream *stream, char __user *buf,
size_t count);
@@ -261,89 +261,18 @@ struct snd_soc_component {
list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list)
/**
- * snd_soc_dapm_to_component() - Casts a DAPM context to the component it is
- * embedded in
- * @dapm: The DAPM context to cast to the component
- *
- * This function must only be used on DAPM contexts that are known to be part of
- * a component (e.g. in a component driver). Otherwise the behavior is
- * undefined.
- */
-static inline struct snd_soc_component *snd_soc_dapm_to_component(
- struct snd_soc_dapm_context *dapm)
-{
- return container_of(dapm, struct snd_soc_component, dapm);
-}
-
-/**
- * snd_soc_component_get_dapm() - Returns the DAPM context associated with a
+ * snd_soc_component_to_dapm() - Returns the DAPM context associated with a
* component
* @component: The component for which to get the DAPM context
*/
-static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
+static inline struct snd_soc_dapm_context *snd_soc_component_to_dapm(
struct snd_soc_component *component)
{
return &component->dapm;
}
-/**
- * snd_soc_component_init_bias_level() - Initialize COMPONENT DAPM bias level
- * @component: The COMPONENT for which to initialize the DAPM bias level
- * @level: The DAPM level to initialize to
- *
- * Initializes the COMPONENT DAPM bias level. See snd_soc_dapm_init_bias_level()
- */
-static inline void
-snd_soc_component_init_bias_level(struct snd_soc_component *component,
- enum snd_soc_bias_level level)
-{
- snd_soc_dapm_init_bias_level(
- snd_soc_component_get_dapm(component), level);
-}
-
-/**
- * snd_soc_component_get_bias_level() - Get current COMPONENT DAPM bias level
- * @component: The COMPONENT for which to get the DAPM bias level
- *
- * Returns: The current DAPM bias level of the COMPONENT.
- */
-static inline enum snd_soc_bias_level
-snd_soc_component_get_bias_level(struct snd_soc_component *component)
-{
- return snd_soc_dapm_get_bias_level(
- snd_soc_component_get_dapm(component));
-}
-
-/**
- * snd_soc_component_force_bias_level() - Set the COMPONENT DAPM bias level
- * @component: The COMPONENT for which to set the level
- * @level: The level to set to
- *
- * Forces the COMPONENT bias level to a specific state. See
- * snd_soc_dapm_force_bias_level().
- */
-static inline int
-snd_soc_component_force_bias_level(struct snd_soc_component *component,
- enum snd_soc_bias_level level)
-{
- return snd_soc_dapm_force_bias_level(
- snd_soc_component_get_dapm(component),
- level);
-}
-
-/**
- * snd_soc_dapm_kcontrol_component() - Returns the component associated to a
- * kcontrol
- * @kcontrol: The kcontrol
- *
- * This function must only be used on DAPM contexts that are known to be part of
- * a COMPONENT (e.g. in a COMPONENT driver). Otherwise the behavior is undefined
- */
-static inline struct snd_soc_component *snd_soc_dapm_kcontrol_component(
- struct snd_kcontrol *kcontrol)
-{
- return snd_soc_dapm_to_component(snd_soc_dapm_kcontrol_dapm(kcontrol));
-}
+// FIXME
+#define snd_soc_component_get_dapm snd_soc_component_to_dapm
/**
* snd_soc_component_cache_sync() - Sync the register cache with the hardware
@@ -498,7 +427,7 @@ int snd_soc_component_compr_get_codec_caps(struct snd_compr_stream *cstream,
struct snd_compr_codec_caps *codec);
int snd_soc_component_compr_ack(struct snd_compr_stream *cstream, size_t bytes);
int snd_soc_component_compr_pointer(struct snd_compr_stream *cstream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int snd_soc_component_compr_copy(struct snd_compr_stream *cstream,
char __user *buf, size_t count);
int snd_soc_component_compr_set_metadata(struct snd_compr_stream *cstream,
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 166c29557e9d..224396927aef 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -256,7 +256,7 @@ int snd_soc_dai_compr_ack(struct snd_soc_dai *dai,
size_t bytes);
int snd_soc_dai_compr_pointer(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
- struct snd_compr_tstamp *tstamp);
+ struct snd_compr_tstamp64 *tstamp);
int snd_soc_dai_compr_set_metadata(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata);
@@ -383,8 +383,9 @@ struct snd_soc_cdai_ops {
struct snd_compr_metadata *, struct snd_soc_dai *);
int (*trigger)(struct snd_compr_stream *, int,
struct snd_soc_dai *);
- int (*pointer)(struct snd_compr_stream *,
- struct snd_compr_tstamp *, struct snd_soc_dai *);
+ int (*pointer)(struct snd_compr_stream *stream,
+ struct snd_compr_tstamp64 *tstamp,
+ struct snd_soc_dai *dai);
int (*ack)(struct snd_compr_stream *, size_t,
struct snd_soc_dai *);
};
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 0b5c7e6a90c8..75941324886b 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -583,11 +583,9 @@ struct snd_soc_dapm_update {
struct snd_soc_dapm_context {
enum snd_soc_bias_level bias_level;
- /* bit field */
- unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
- unsigned int suspend_bias_off:1; /* Use BIAS_OFF in suspend if the DAPM is idle */
+ bool idle_bias; /* Use BIAS_OFF instead of STANDBY when false */
- struct device *dev; /* from parent - for debug */
+ struct device *dev; /* from parent - for debug */ /* REMOVE ME */
struct snd_soc_component *component; /* parent component */
struct snd_soc_card *card; /* parent card */
@@ -660,6 +658,12 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
+struct device *snd_soc_dapm_to_dev(struct snd_soc_dapm_context *dapm);
+struct snd_soc_card *snd_soc_dapm_to_card(struct snd_soc_dapm_context *dapm);
+struct snd_soc_component *snd_soc_dapm_to_component(struct snd_soc_dapm_context *dapm);
+
+bool snd_soc_dapm_get_idle_bias(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_set_idle_bias(struct snd_soc_dapm_context *dapm, bool on);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
@@ -699,7 +703,6 @@ int snd_soc_dapm_sync_unlocked(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin);
int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm, const char *pin);
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm, const char *pin);
-unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
void snd_soc_dapm_mark_endpoints_dirty(struct snd_soc_card *card);
/*
@@ -718,10 +721,23 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
bool (*custom_stop_condition)(struct snd_soc_dapm_widget *, enum snd_soc_dapm_direction));
void snd_soc_dapm_dai_free_widgets(struct snd_soc_dapm_widget_list **list);
-struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_dapm(struct snd_kcontrol *kcontrol);
-struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_widget(struct snd_kcontrol *kcontrol);
+struct snd_soc_dapm_context *snd_soc_dapm_kcontrol_to_dapm(struct snd_kcontrol *kcontrol);
+struct snd_soc_dapm_widget *snd_soc_dapm_kcontrol_to_widget(struct snd_kcontrol *kcontrol);
+struct snd_soc_component *snd_soc_dapm_kcontrol_to_component(struct snd_kcontrol *kcontrol);
+unsigned int snd_soc_dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level);
+enum snd_soc_bias_level snd_soc_dapm_get_bias_level(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_init_bias_level(struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level);
+
+// REMOVE ME !!
+#define snd_soc_component_force_bias_level(c, l) snd_soc_dapm_force_bias_level(&(c)->dapm, l)
+#define snd_soc_component_get_bias_level(c) snd_soc_dapm_get_bias_level(&(c)->dapm)
+#define snd_soc_component_init_bias_level(c, l) snd_soc_dapm_init_bias_level(&(c)->dapm, l)
+#define snd_soc_dapm_kcontrol_widget snd_soc_dapm_kcontrol_to_widget
+#define snd_soc_dapm_kcontrol_dapm snd_soc_dapm_kcontrol_to_dapm
+#define dapm_kcontrol_get_value snd_soc_dapm_kcontrol_get_value
+#define snd_soc_dapm_kcontrol_component snd_soc_dapm_kcontrol_to_component
#define for_each_dapm_widgets(list, i, widget) \
for ((i) = 0; \
@@ -729,37 +745,6 @@ int snd_soc_dapm_force_bias_level(struct snd_soc_dapm_context *dapm, enum snd_so
(i)++)
/**
- * snd_soc_dapm_init_bias_level() - Initialize DAPM bias level
- * @dapm: The DAPM context to initialize
- * @level: The DAPM level to initialize to
- *
- * This function only sets the driver internal state of the DAPM level and will
- * not modify the state of the device. Hence it should not be used during normal
- * operation, but only to synchronize the internal state to the device state.
- * E.g. during driver probe to set the DAPM level to the one corresponding with
- * the power-on reset state of the device.
- *
- * To change the DAPM state of the device use snd_soc_dapm_set_bias_level().
- */
-static inline void snd_soc_dapm_init_bias_level(
- struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level)
-{
- dapm->bias_level = level;
-}
-
-/**
- * snd_soc_dapm_get_bias_level() - Get current DAPM bias level
- * @dapm: The context for which to get the bias level
- *
- * Returns: The current bias level of the passed DAPM context.
- */
-static inline enum snd_soc_bias_level snd_soc_dapm_get_bias_level(
- struct snd_soc_dapm_context *dapm)
-{
- return dapm->bias_level;
-}
-
-/**
* snd_soc_dapm_widget_for_each_path - Iterates over all paths in the
* specified direction of a widget
* @w: The widget
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1fffef311c41..ddc508ff7b9b 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1120,6 +1120,11 @@ static inline int snd_soc_card_is_instantiated(struct snd_soc_card *card)
return card && card->instantiated;
}
+static inline struct snd_soc_dapm_context *snd_soc_card_to_dapm(struct snd_soc_card *card)
+{
+ return &card->dapm;
+}
+
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
struct device *dev;
diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h
index 6049a5d0cfcd..3c5e9b2af7f1 100644
--- a/include/sound/soc_sdw_utils.h
+++ b/include/sound/soc_sdw_utils.h
@@ -248,5 +248,13 @@ int asoc_sdw_cs42l43_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_so
int asoc_sdw_cs42l43_dmic_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
int asoc_sdw_cs_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
int asoc_sdw_maxim_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+/* TI */
+int asoc_sdw_ti_amp_init(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_links,
+ struct asoc_sdw_codec_info *info,
+ bool playback);
+int asoc_sdw_ti_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai);
+int asoc_sdw_ti_amp_initial_settings(struct snd_soc_card *card,
+ const char *name_prefix);
#endif
diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h
index e85c7afd85a4..15fac532688e 100644
--- a/include/sound/sof/ipc4/header.h
+++ b/include/sound/sof/ipc4/header.h
@@ -326,10 +326,14 @@ struct sof_ipc4_base_module_cfg {
#define SOF_IPC4_MOD_INSTANCE_SHIFT 16
#define SOF_IPC4_MOD_INSTANCE_MASK GENMASK(23, 16)
#define SOF_IPC4_MOD_INSTANCE(x) ((x) << SOF_IPC4_MOD_INSTANCE_SHIFT)
+#define SOF_IPC4_MOD_INSTANCE_GET(x) (((x) & SOF_IPC4_MOD_INSTANCE_MASK) \
+ >> SOF_IPC4_MOD_INSTANCE_SHIFT)
#define SOF_IPC4_MOD_ID_SHIFT 0
#define SOF_IPC4_MOD_ID_MASK GENMASK(15, 0)
#define SOF_IPC4_MOD_ID(x) ((x) << SOF_IPC4_MOD_ID_SHIFT)
+#define SOF_IPC4_MOD_ID_GET(x) (((x) & SOF_IPC4_MOD_ID_MASK) \
+ >> SOF_IPC4_MOD_ID_SHIFT)
/* init module ipc msg */
#define SOF_IPC4_MOD_EXT_PARAM_SIZE_SHIFT 0
diff --git a/include/sound/soundfont.h b/include/sound/soundfont.h
index 8a40cc15f66d..48f8cf6de3ac 100644
--- a/include/sound/soundfont.h
+++ b/include/sound/soundfont.h
@@ -114,5 +114,23 @@ int snd_sf_calc_parm_decay(int msec);
extern int snd_sf_vol_table[128];
int snd_sf_linear_to_log(unsigned int amount, int offset, int ratio);
+/* lock access to sflist */
+static inline void snd_soundfont_lock_preset(struct snd_sf_list *sflist)
+{
+ mutex_lock(&sflist->presets_mutex);
+ guard(spinlock_irqsave)(&sflist->lock);
+ sflist->presets_locked = 1;
+}
+
+/* remove lock */
+static inline void snd_soundfont_unlock_preset(struct snd_sf_list *sflist)
+{
+ guard(spinlock_irqsave)(&sflist->lock);
+ sflist->presets_locked = 0;
+ mutex_unlock(&sflist->presets_mutex);
+}
+
+DEFINE_GUARD(snd_soundfont_lock_preset, struct snd_sf_list *,
+ snd_soundfont_lock_preset(_T), snd_soundfont_unlock_preset(_T))
#endif /* __SOUND_SOUNDFONT_H */
diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h
index c3a9efa73d5d..dd6ee45ad096 100644
--- a/include/sound/tas2781-dsp.h
+++ b/include/sound/tas2781-dsp.h
@@ -34,6 +34,7 @@
#define PPC3_VERSION_TAS2781_BASIC_MIN 0x14600
#define PPC3_VERSION_TAS2781_ALPHA_MIN 0x4a00
#define PPC3_VERSION_TAS2781_BETA_MIN 0x19400
+#define PPC3_VERSION_TAS5825_BASE 0x114200
#define TASDEVICE_DEVICE_SUM 8
#define TASDEVICE_CONFIG_SUM 64
@@ -53,6 +54,8 @@ enum tasdevice_dsp_dev_idx {
TASDEVICE_DSP_TAS_2781_DUAL_MONO,
TASDEVICE_DSP_TAS_2781_21,
TASDEVICE_DSP_TAS_2781_QUAD,
+ TASDEVICE_DSP_TAS_5825_MONO,
+ TASDEVICE_DSP_TAS_5825_DUAL,
TASDEVICE_DSP_TAS_MAX_DEVICE
};
@@ -198,6 +201,14 @@ struct tasdevice_rca {
int ncfgs;
struct tasdevice_config_info **cfg_info;
int profile_cfg_id;
+ /*
+ * Since version 0x105, the keyword 'init' was introduced into the
+ * profile, which is used for chip initialization, particularly to
+ * store common settings for other non-initialization profiles.
+ * if (init_profile_id < 0)
+ * No init profile inside the RCA firmware.
+ */
+ int init_profile_id;
};
void tasdevice_select_cfg_blk(void *context, int conf_no,
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 3875e92f1ec5..ddd997ac3216 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -49,10 +49,12 @@
#define TASDEVICE_REG(book, page, reg) (((book * 256 * 128) + \
(page * 128)) + reg)
-/* Software Reset */
+/* Software Reset, compatble with new device (TAS5825). */
#define TASDEVICE_REG_SWRESET TASDEVICE_REG(0x0, 0x0, 0x01)
#define TASDEVICE_REG_SWRESET_RESET BIT(0)
+#define TAS5825_REG_SWRESET_RESET (BIT(0) | BIT(4))
+
/* Checksum */
#define TASDEVICE_CHECKSUM_REG TASDEVICE_REG(0x0, 0x0, 0x7e)
@@ -110,8 +112,17 @@
#define TAS2781_RUNTIME_RE_REG TASDEVICE_REG(0x64, 0x63, 0x44)
enum audio_device {
+ TAS2020,
+ TAS2118,
+ TAS2120,
+ TAS2320,
TAS2563,
+ TAS2570,
+ TAS2572,
TAS2781,
+ TAS5825,
+ TAS5827,
+ TAS_OTHERS,
};
enum dspbin_type {
@@ -194,6 +205,7 @@ struct tasdevice_priv {
unsigned char coef_binaryname[64];
unsigned char rca_binaryname[64];
unsigned char dev_name[32];
+ const unsigned char (*dvc_tlv_table)[4];
const char *name_prefix;
unsigned char ndev;
unsigned int dspbin_typ;
diff --git a/include/sound/tas2x20-tlv.h b/include/sound/tas2x20-tlv.h
new file mode 100644
index 000000000000..6e6bcec4a0a1
--- /dev/null
+++ b/include/sound/tas2x20-tlv.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS2x20/TAS2118 Audio Smart Amplifier
+//
+// Copyright (C) 2025 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS2x20/TAS2118 hda driver implements for one, two, or even multiple
+// TAS2x20/TAS2118 chips.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+//
+
+#ifndef __TAS2X20_TLV_H__
+#define __TAS2X20_TLV_H__
+
+#define TAS2X20_DVC_LEVEL TASDEVICE_REG(0x0, 0x2, 0x0c)
+#define TAS2X20_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x07)
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2x20_dvc_tlv, 1650, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2x20_amp_tlv, 2100, 50, 0);
+
+/* pow(10, db/20) * pow(2,22) */
+static const __maybe_unused unsigned char tas2x20_dvc_table[][4] = {
+ { 0X00, 0X00, 0X0D, 0X00 }, /* -110.0db */
+ { 0X00, 0X00, 0X0E, 0X00 }, /* -109.5db */
+ { 0X00, 0X00, 0X0E, 0X00 }, /* -109.0db */
+ { 0X00, 0X00, 0X0F, 0X00 }, /* -108.5db */
+ { 0X00, 0X00, 0X10, 0X00 }, /* -108.0db */
+ { 0X00, 0X00, 0X11, 0X00 }, /* -107.5db */
+ { 0X00, 0X00, 0X12, 0X00 }, /* -107.0db */
+ { 0X00, 0X00, 0X13, 0X00 }, /* -106.5db */
+ { 0X00, 0X00, 0X15, 0X00 }, /* -106.0db */
+ { 0X00, 0X00, 0X16, 0X00 }, /* -105.5db */
+ { 0X00, 0X00, 0X17, 0X00 }, /* -105.0db */
+ { 0X00, 0X00, 0X18, 0X00 }, /* -104.5db */
+ { 0X00, 0X00, 0X1A, 0X00 }, /* -104.0db */
+ { 0X00, 0X00, 0X1C, 0X00 }, /* -103.5db */
+ { 0X00, 0X00, 0X1D, 0X00 }, /* -103.0db */
+ { 0X00, 0X00, 0X1F, 0X00 }, /* -102.5db */
+ { 0X00, 0X00, 0X21, 0X00 }, /* -102.0db */
+ { 0X00, 0X00, 0X23, 0X00 }, /* -101.5db */
+ { 0X00, 0X00, 0X25, 0X00 }, /* -101.0db */
+ { 0X00, 0X00, 0X27, 0X00 }, /* -100.5db */
+ { 0X00, 0X00, 0X29, 0X00 }, /* -100.0db */
+ { 0X00, 0X00, 0X2C, 0X00 }, /* -99.5db */
+ { 0X00, 0X00, 0X2F, 0X00 }, /* -99.0db */
+ { 0X00, 0X00, 0X31, 0X00 }, /* -98.5db */
+ { 0X00, 0X00, 0X34, 0X00 }, /* -98.0db */
+ { 0X00, 0X00, 0X37, 0X00 }, /* -97.5db */
+ { 0X00, 0X00, 0X3B, 0X00 }, /* -97.0db */
+ { 0X00, 0X00, 0X3E, 0X00 }, /* -96.5db */
+ { 0X00, 0X00, 0X42, 0X00 }, /* -96.0db */
+ { 0X00, 0X00, 0X46, 0X00 }, /* -95.5db */
+ { 0X00, 0X00, 0X4A, 0X00 }, /* -95.0db */
+ { 0X00, 0X00, 0X4F, 0X00 }, /* -94.5db */
+ { 0X00, 0X00, 0X53, 0X00 }, /* -94.0db */
+ { 0X00, 0X00, 0X58, 0X00 }, /* -93.5db */
+ { 0X00, 0X00, 0X5D, 0X00 }, /* -93.0db */
+ { 0X00, 0X00, 0X63, 0X00 }, /* -92.5db */
+ { 0X00, 0X00, 0X69, 0X00 }, /* -92.0db */
+ { 0X00, 0X00, 0X6F, 0X00 }, /* -91.5db */
+ { 0X00, 0X00, 0X76, 0X00 }, /* -91.0db */
+ { 0X00, 0X00, 0X7D, 0X00 }, /* -90.5db */
+ { 0X00, 0X00, 0X84, 0X00 }, /* -90.0db */
+ { 0X00, 0X00, 0X8C, 0X00 }, /* -89.5db */
+ { 0X00, 0X00, 0X94, 0X00 }, /* -89.0db */
+ { 0X00, 0X00, 0X9D, 0X00 }, /* -88.5db */
+ { 0X00, 0X00, 0XA6, 0X00 }, /* -88.0db */
+ { 0X00, 0X00, 0XB0, 0X00 }, /* -87.5db */
+ { 0X00, 0X00, 0XBB, 0X00 }, /* -87.0db */
+ { 0X00, 0X00, 0XC6, 0X00 }, /* -86.5db */
+ { 0X00, 0X00, 0XD2, 0X00 }, /* -86.0db */
+ { 0X00, 0X00, 0XDE, 0X00 }, /* -85.5db */
+ { 0X00, 0X00, 0XEB, 0X00 }, /* -85.0db */
+ { 0X00, 0X00, 0XF9, 0X00 }, /* -84.5db */
+ { 0X00, 0X01, 0X08, 0X00 }, /* -84.0db */
+ { 0X00, 0X01, 0X18, 0X00 }, /* -83.5db */
+ { 0X00, 0X01, 0X28, 0X00 }, /* -83.0db */
+ { 0X00, 0X01, 0X3A, 0X00 }, /* -82.5db */
+ { 0X00, 0X01, 0X4D, 0X00 }, /* -82.0db */
+ { 0X00, 0X01, 0X60, 0X00 }, /* -81.5db */
+ { 0X00, 0X01, 0X75, 0X00 }, /* -81.0db */
+ { 0X00, 0X01, 0X8B, 0X00 }, /* -80.5db */
+ { 0X00, 0X01, 0XA3, 0X00 }, /* -80.0db */
+ { 0X00, 0X01, 0XBC, 0X00 }, /* -79.5db */
+ { 0X00, 0X01, 0XD6, 0X00 }, /* -79.0db */
+ { 0X00, 0X01, 0XF2, 0X00 }, /* -78.5db */
+ { 0X00, 0X02, 0X10, 0X00 }, /* -78.0db */
+ { 0X00, 0X02, 0X2F, 0X00 }, /* -77.5db */
+ { 0X00, 0X02, 0X50, 0X00 }, /* -77.0db */
+ { 0X00, 0X02, 0X73, 0X00 }, /* -76.5db */
+ { 0X00, 0X02, 0X98, 0X00 }, /* -76.0db */
+ { 0X00, 0X02, 0XC0, 0X00 }, /* -75.5db */
+ { 0X00, 0X02, 0XE9, 0X00 }, /* -75.0db */
+ { 0X00, 0X03, 0X16, 0X00 }, /* -74.5db */
+ { 0X00, 0X03, 0X44, 0X00 }, /* -74.0db */
+ { 0X00, 0X03, 0X76, 0X00 }, /* -73.5db */
+ { 0X00, 0X03, 0XAA, 0X00 }, /* -73.0db */
+ { 0X00, 0X03, 0XE2, 0X00 }, /* -72.5db */
+ { 0X00, 0X04, 0X1D, 0X00 }, /* -72.0db */
+ { 0X00, 0X04, 0X5B, 0X00 }, /* -71.5db */
+ { 0X00, 0X04, 0X9E, 0X00 }, /* -71.0db */
+ { 0X00, 0X04, 0XE4, 0X00 }, /* -70.5db */
+ { 0X00, 0X05, 0X2E, 0X00 }, /* -70.0db */
+ { 0X00, 0X05, 0X7C, 0X00 }, /* -69.5db */
+ { 0X00, 0X05, 0XD0, 0X00 }, /* -69.0db */
+ { 0X00, 0X06, 0X28, 0X00 }, /* -68.5db */
+ { 0X00, 0X06, 0X85, 0X00 }, /* -68.0db */
+ { 0X00, 0X06, 0XE8, 0X00 }, /* -67.5db */
+ { 0X00, 0X07, 0X51, 0X00 }, /* -67.0db */
+ { 0X00, 0X07, 0XC0, 0X00 }, /* -66.5db */
+ { 0X00, 0X08, 0X36, 0X00 }, /* -66.0db */
+ { 0X00, 0X08, 0XB2, 0X00 }, /* -65.5db */
+ { 0X00, 0X09, 0X36, 0X00 }, /* -65.0db */
+ { 0X00, 0X09, 0XC2, 0X00 }, /* -64.5db */
+ { 0X00, 0X0A, 0X56, 0X00 }, /* -64.0db */
+ { 0X00, 0X0A, 0XF3, 0X00 }, /* -63.5db */
+ { 0X00, 0X0B, 0X99, 0X00 }, /* -63.0db */
+ { 0X00, 0X0C, 0X49, 0X00 }, /* -62.5db */
+ { 0X00, 0X0D, 0X03, 0X00 }, /* -62.0db */
+ { 0X00, 0X0D, 0XC9, 0X00 }, /* -61.5db */
+ { 0X00, 0X0E, 0X9A, 0X00 }, /* -61.0db */
+ { 0X00, 0X0F, 0X77, 0X00 }, /* -60.5db */
+ { 0X00, 0X10, 0X62, 0X00 }, /* -60.0db */
+ { 0X00, 0X11, 0X5A, 0X00 }, /* -59.5db */
+ { 0X00, 0X12, 0X62, 0X00 }, /* -59.0db */
+ { 0X00, 0X13, 0X78, 0X00 }, /* -58.5db */
+ { 0X00, 0X14, 0XA0, 0X00 }, /* -58.0db */
+ { 0X00, 0X15, 0XD9, 0X00 }, /* -57.5db */
+ { 0X00, 0X17, 0X24, 0X00 }, /* -57.0db */
+ { 0X00, 0X18, 0X83, 0X00 }, /* -56.5db */
+ { 0X00, 0X19, 0XF7, 0X00 }, /* -56.0db */
+ { 0X00, 0X1B, 0X81, 0X00 }, /* -55.5db */
+ { 0X00, 0X1D, 0X22, 0X00 }, /* -55.0db */
+ { 0X00, 0X1E, 0XDC, 0X00 }, /* -54.5db */
+ { 0X00, 0X20, 0XB0, 0X00 }, /* -54.0db */
+ { 0X00, 0X22, 0XA0, 0X00 }, /* -53.5db */
+ { 0X00, 0X24, 0XAD, 0X00 }, /* -53.0db */
+ { 0X00, 0X26, 0XDA, 0X00 }, /* -52.5db */
+ { 0X00, 0X29, 0X27, 0X00 }, /* -52.0db */
+ { 0X00, 0X2B, 0X97, 0X00 }, /* -51.5db */
+ { 0X00, 0X2E, 0X2D, 0X00 }, /* -51.0db */
+ { 0X00, 0X30, 0XE9, 0X00 }, /* -50.5db */
+ { 0X00, 0X33, 0XCF, 0X00 }, /* -50.0db */
+ { 0X00, 0X36, 0XE1, 0X00 }, /* -49.5db */
+ { 0X00, 0X3A, 0X21, 0X00 }, /* -49.0db */
+ { 0X00, 0X3D, 0X93, 0X00 }, /* -48.5db */
+ { 0X00, 0X41, 0X39, 0X00 }, /* -48.0db */
+ { 0X00, 0X45, 0X17, 0X00 }, /* -47.5db */
+ { 0X00, 0X49, 0X2F, 0X00 }, /* -47.0db */
+ { 0X00, 0X4D, 0X85, 0X00 }, /* -46.5db */
+ { 0X00, 0X52, 0X1D, 0X00 }, /* -46.0db */
+ { 0X00, 0X56, 0XFA, 0X00 }, /* -45.5db */
+ { 0X00, 0X5C, 0X22, 0X00 }, /* -45.0db */
+ { 0X00, 0X61, 0X97, 0X00 }, /* -44.5db */
+ { 0X00, 0X67, 0X60, 0X00 }, /* -44.0db */
+ { 0X00, 0X6D, 0X80, 0X00 }, /* -43.5db */
+ { 0X00, 0X73, 0XFD, 0X00 }, /* -43.0db */
+ { 0X00, 0X7A, 0XDC, 0X00 }, /* -42.5db */
+ { 0X00, 0X82, 0X24, 0X00 }, /* -42.0db */
+ { 0X00, 0X89, 0XDA, 0X00 }, /* -41.5db */
+ { 0X00, 0X92, 0X05, 0X00 }, /* -41.0db */
+ { 0X00, 0X9A, 0XAC, 0X00 }, /* -40.5db */
+ { 0X00, 0XA3, 0XD7, 0X00 }, /* -40.0db */
+ { 0X00, 0XAD, 0X8C, 0X00 }, /* -39.5db */
+ { 0X00, 0XB7, 0XD4, 0X00 }, /* -39.0db */
+ { 0X00, 0XC2, 0XB9, 0X00 }, /* -38.5db */
+ { 0X00, 0XCE, 0X43, 0X00 }, /* -38.0db */
+ { 0X00, 0XDA, 0X7B, 0X00 }, /* -37.5db */
+ { 0X00, 0XE7, 0X6E, 0X00 }, /* -37.0db */
+ { 0X00, 0XF5, 0X24, 0X00 }, /* -36.5db */
+ { 0X01, 0X03, 0XAB, 0X00 }, /* -36.0db */
+ { 0X01, 0X13, 0X0E, 0X00 }, /* -35.5db */
+ { 0X01, 0X23, 0X5A, 0X00 }, /* -35.0db */
+ { 0X01, 0X34, 0X9D, 0X00 }, /* -34.5db */
+ { 0X01, 0X46, 0XE7, 0X00 }, /* -34.0db */
+ { 0X01, 0X5A, 0X46, 0X00 }, /* -33.5db */
+ { 0X01, 0X6E, 0XCA, 0X00 }, /* -33.0db */
+ { 0X01, 0X84, 0X86, 0X00 }, /* -32.5db */
+ { 0X01, 0X9B, 0X8C, 0X00 }, /* -32.0db */
+ { 0X01, 0XB3, 0XEE, 0X00 }, /* -31.5db */
+ { 0X01, 0XCD, 0XC3, 0X00 }, /* -31.0db */
+ { 0X01, 0XE9, 0X20, 0X00 }, /* -30.5db */
+ { 0X02, 0X06, 0X1B, 0X00 }, /* -30.0db */
+ { 0X02, 0X24, 0XCE, 0X00 }, /* -29.5db */
+ { 0X02, 0X45, 0X53, 0X00 }, /* -29.0db */
+ { 0X02, 0X67, 0XC5, 0X00 }, /* -28.5db */
+ { 0X02, 0X8C, 0X42, 0X00 }, /* -28.0db */
+ { 0X02, 0XB2, 0XE8, 0X00 }, /* -27.5db */
+ { 0X02, 0XDB, 0XD8, 0X00 }, /* -27.0db */
+ { 0X03, 0X07, 0X36, 0X00 }, /* -26.5db */
+ { 0X03, 0X35, 0X25, 0X00 }, /* -26.0db */
+ { 0X03, 0X65, 0XCD, 0X00 }, /* -25.5db */
+ { 0X03, 0X99, 0X57, 0X00 }, /* -25.0db */
+ { 0X03, 0XCF, 0XEE, 0X00 }, /* -24.5db */
+ { 0X04, 0X09, 0XC2, 0X00 }, /* -24.0db */
+ { 0X04, 0X47, 0X03, 0X00 }, /* -23.5db */
+ { 0X04, 0X87, 0XE5, 0X00 }, /* -23.0db */
+ { 0X04, 0XCC, 0XA0, 0X00 }, /* -22.5db */
+ { 0X05, 0X15, 0X6D, 0X00 }, /* -22.0db */
+ { 0X05, 0X62, 0X8A, 0X00 }, /* -21.5db */
+ { 0X05, 0XB4, 0X39, 0X00 }, /* -21.0db */
+ { 0X06, 0X0A, 0XBF, 0X00 }, /* -20.5db */
+ { 0X06, 0X66, 0X66, 0X00 }, /* -20.0db */
+ { 0X06, 0XC7, 0X7B, 0X00 }, /* -19.5db */
+ { 0X07, 0X2E, 0X50, 0X00 }, /* -19.0db */
+ { 0X07, 0X9B, 0X3D, 0X00 }, /* -18.5db */
+ { 0X08, 0X0E, 0X9F, 0X00 }, /* -18.0db */
+ { 0X08, 0X88, 0XD7, 0X00 }, /* -17.5db */
+ { 0X09, 0X0A, 0X4D, 0X00 }, /* -17.0db */
+ { 0X09, 0X93, 0X6E, 0X00 }, /* -16.5db */
+ { 0X0A, 0X24, 0XB0, 0X00 }, /* -16.0db */
+ { 0X0A, 0XBE, 0X8D, 0X00 }, /* -15.5db */
+ { 0X0B, 0X61, 0X88, 0X00 }, /* -15.0db */
+ { 0X0C, 0X0E, 0X2B, 0X00 }, /* -14.5db */
+ { 0X0C, 0XC5, 0X09, 0X00 }, /* -14.0db */
+ { 0X0D, 0X86, 0XBD, 0X00 }, /* -13.5db */
+ { 0X0E, 0X53, 0XEB, 0X00 }, /* -13.0db */
+ { 0X0F, 0X2D, 0X42, 0X00 }, /* -12.5db */
+ { 0X10, 0X13, 0X79, 0X00 }, /* -12.0db */
+ { 0X11, 0X07, 0X54, 0X00 }, /* -11.5db */
+ { 0X12, 0X09, 0XA3, 0X00 }, /* -11.0db */
+ { 0X13, 0X1B, 0X40, 0X00 }, /* -10.5db */
+ { 0X14, 0X3D, 0X13, 0X00 }, /* -10.0db */
+ { 0X15, 0X70, 0X12, 0X00 }, /* -9.5db */
+ { 0X16, 0XB5, 0X43, 0X00 }, /* -9.0db */
+ { 0X18, 0X0D, 0XB8, 0X00 }, /* -8.5db */
+ { 0X19, 0X7A, 0X96, 0X00 }, /* -8.0db */
+ { 0X1A, 0XFD, 0X13, 0X00 }, /* -7.5db */
+ { 0X1C, 0X96, 0X76, 0X00 }, /* -7.0db */
+ { 0X1E, 0X48, 0X1C, 0X00 }, /* -6.5db */
+ { 0X20, 0X13, 0X73, 0X00 }, /* -6.0db */
+ { 0X21, 0XFA, 0X02, 0X00 }, /* -5.5db */
+ { 0X23, 0XFD, 0X66, 0X00 }, /* -5.0db */
+ { 0X26, 0X1F, 0X54, 0X00 }, /* -4.5db */
+ { 0X28, 0X61, 0X9A, 0X00 }, /* -4.0db */
+ { 0X2A, 0XC6, 0X25, 0X00 }, /* -3.5db */
+ { 0X2D, 0X4E, 0XFB, 0X00 }, /* -3.0db */
+ { 0X2F, 0XFE, 0X44, 0X00 }, /* -2.5db */
+ { 0X32, 0XD6, 0X46, 0X00 }, /* -2.0db */
+ { 0X35, 0XD9, 0X6B, 0X00 }, /* -1.5db */
+ { 0X39, 0X0A, 0X41, 0X00 }, /* -1.0db */
+ { 0X3C, 0X6B, 0X7E, 0X00 }, /* -0.5db */
+ { 0X40, 0X00, 0X00, 0X00 }, /* 0.0db */
+ { 0X43, 0XCA, 0XD0, 0X00 }, /* 0.5db */
+ { 0X47, 0XCF, 0X26, 0X00 }, /* 1.0db */
+ { 0X4C, 0X10, 0X6B, 0X00 }, /* 1.5db */
+ { 0X50, 0X92, 0X3B, 0X00 }, /* 2.0db */
+ { 0X55, 0X58, 0X6A, 0X00 }, /* 2.5db */
+ { 0X5A, 0X67, 0X03, 0X00 }, /* 3.0db */
+ { 0X5F, 0XC2, 0X53, 0X00 }, /* 3.5db */
+ { 0X65, 0X6E, 0XE3, 0X00 }, /* 4.0db */
+ { 0X6B, 0X71, 0X86, 0X00 }, /* 4.5db */
+ { 0X71, 0XCF, 0X54, 0X00 }, /* 5.0db */
+ { 0X78, 0X8D, 0XB4, 0X00 }, /* 5.5db */
+ { 0X7F, 0XB2, 0X61, 0X00 }, /* 6.0db */
+};
+#endif
diff --git a/include/sound/tas5825-tlv.h b/include/sound/tas5825-tlv.h
new file mode 100644
index 000000000000..95f2d3fad120
--- /dev/null
+++ b/include/sound/tas5825-tlv.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+//
+// ALSA SoC Texas Instruments TAS5825 Audio Smart Amplifier
+//
+// Copyright (C) 2025 Texas Instruments Incorporated
+// https://www.ti.com
+//
+// The TAS5825 hda driver implements for one or two TAS5825 chips.
+//
+// Author: Baojun Xu <baojun.xu@ti.com>
+//
+
+#ifndef __TAS5825_TLV_H__
+#define __TAS5825_TLV_H__
+
+#define TAS5825_DVC_LEVEL TASDEVICE_REG(0x0, 0x0, 0x4c)
+#define TAS5825_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x54)
+
+static const __maybe_unused DECLARE_TLV_DB_SCALE(
+ tas5825_dvc_tlv, -10300, 50, 0);
+static const __maybe_unused DECLARE_TLV_DB_SCALE(
+ tas5825_amp_tlv, -1550, 50, 0);
+
+#endif
diff --git a/include/sound/tlv320dac33-plat.h b/include/sound/tlv320dac33-plat.h
deleted file mode 100644
index 7a7249a896e3..000000000000
--- a/include/sound/tlv320dac33-plat.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Platform header for Texas Instruments TLV320DAC33 codec driver
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * Copyright: (C) 2009 Nokia Corporation
- */
-
-#ifndef __TLV320DAC33_PLAT_H
-#define __TLV320DAC33_PLAT_H
-
-struct tlv320dac33_platform_data {
- int power_gpio;
- int mode1_latency; /* latency caused by the i2c writes in us */
- int auto_fifo_config; /* FIFO config based on the period size */
- int keep_bclk; /* Keep the BCLK running in FIFO modes */
- u8 burst_bclkdiv;
-};
-
-#endif /* __TLV320DAC33_PLAT_H */
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index 383c09f583ac..37195edf2498 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -38,25 +38,32 @@ TRACE_EVENT(cma_release,
TRACE_EVENT(cma_alloc_start,
- TP_PROTO(const char *name, unsigned long count, unsigned int align),
+ TP_PROTO(const char *name, unsigned long request_count, unsigned long available_count,
+ unsigned long total_count, unsigned int align),
- TP_ARGS(name, count, align),
+ TP_ARGS(name, request_count, available_count, total_count, align),
TP_STRUCT__entry(
__string(name, name)
- __field(unsigned long, count)
+ __field(unsigned long, request_count)
+ __field(unsigned long, available_count)
+ __field(unsigned long, total_count)
__field(unsigned int, align)
),
TP_fast_assign(
__assign_str(name);
- __entry->count = count;
+ __entry->request_count = request_count;
+ __entry->available_count = available_count;
+ __entry->total_count = total_count;
__entry->align = align;
),
- TP_printk("name=%s count=%lu align=%u",
+ TP_printk("name=%s request_count=%lu available_count=%lu total_count=%lu align=%u",
__get_str(name),
- __entry->count,
+ __entry->request_count,
+ __entry->available_count,
+ __entry->total_count,
__entry->align)
);
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
index d8ddc27b6a7c..5da59fd8121d 100644
--- a/include/trace/events/dma.h
+++ b/include/trace/events/dma.h
@@ -31,7 +31,8 @@ TRACE_DEFINE_ENUM(DMA_NONE);
{ DMA_ATTR_FORCE_CONTIGUOUS, "FORCE_CONTIGUOUS" }, \
{ DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
{ DMA_ATTR_NO_WARN, "NO_WARN" }, \
- { DMA_ATTR_PRIVILEGED, "PRIVILEGED" })
+ { DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
+ { DMA_ATTR_MMIO, "MMIO" })
DECLARE_EVENT_CLASS(dma_map,
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
@@ -71,8 +72,7 @@ DEFINE_EVENT(dma_map, name, \
size_t size, enum dma_data_direction dir, unsigned long attrs), \
TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs))
-DEFINE_MAP_EVENT(dma_map_page);
-DEFINE_MAP_EVENT(dma_map_resource);
+DEFINE_MAP_EVENT(dma_map_phys);
DECLARE_EVENT_CLASS(dma_unmap,
TP_PROTO(struct device *dev, dma_addr_t addr, size_t size,
@@ -109,8 +109,7 @@ DEFINE_EVENT(dma_unmap, name, \
enum dma_data_direction dir, unsigned long attrs), \
TP_ARGS(dev, addr, size, dir, attrs))
-DEFINE_UNMAP_EVENT(dma_unmap_page);
-DEFINE_UNMAP_EVENT(dma_unmap_resource);
+DEFINE_UNMAP_EVENT(dma_unmap_phys);
DECLARE_EVENT_CLASS(dma_alloc_class,
TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 20b914250ce9..feb28b359eff 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -7,6 +7,8 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <net/flow.h>
+#include <net/inet_dscp.h>
#include <net/ip_fib.h>
#include <linux/tracepoint.h>
@@ -44,7 +46,7 @@ TRACE_EVENT(fib_table_lookup,
__entry->err = err;
__entry->oif = flp->flowi4_oif;
__entry->iif = flp->flowi4_iif;
- __entry->tos = flp->flowi4_tos;
+ __entry->tos = inet_dscp_to_dsfield(flp->flowi4_dscp);
__entry->scope = flp->flowi4_scope;
__entry->flags = flp->flowi4_flags;
diff --git a/include/trace/events/habanalabs.h b/include/trace/events/habanalabs.h
index 4a2bb2c896d1..fa0d2c6bace4 100644
--- a/include/trace/events/habanalabs.h
+++ b/include/trace/events/habanalabs.h
@@ -145,7 +145,7 @@ DECLARE_EVENT_CLASS(habanalabs_comms_template,
__entry->op_str = op_str;
),
- TP_printk("%s: cms: %s",
+ TP_printk("%s: cmd: %s",
__get_str(dname),
__entry->op_str)
);
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index 2305df6cb485..dd94d14a2427 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -19,7 +19,6 @@
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
- EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
EM( SCAN_SCAN_ABORT, "scan_aborted") \
@@ -55,15 +54,14 @@ SCAN_STATUS
TRACE_EVENT(mm_khugepaged_scan_pmd,
- TP_PROTO(struct mm_struct *mm, struct folio *folio, bool writable,
+ TP_PROTO(struct mm_struct *mm, struct folio *folio,
int referenced, int none_or_zero, int status, int unmapped),
- TP_ARGS(mm, folio, writable, referenced, none_or_zero, status, unmapped),
+ TP_ARGS(mm, folio, referenced, none_or_zero, status, unmapped),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
__field(unsigned long, pfn)
- __field(bool, writable)
__field(int, referenced)
__field(int, none_or_zero)
__field(int, status)
@@ -73,17 +71,15 @@ TRACE_EVENT(mm_khugepaged_scan_pmd,
TP_fast_assign(
__entry->mm = mm;
__entry->pfn = folio ? folio_pfn(folio) : -1;
- __entry->writable = writable;
__entry->referenced = referenced;
__entry->none_or_zero = none_or_zero;
__entry->status = status;
__entry->unmapped = unmapped;
),
- TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d",
+ TP_printk("mm=%p, scan_pfn=0x%lx, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d",
__entry->mm,
__entry->pfn,
- __entry->writable,
__entry->referenced,
__entry->none_or_zero,
__print_symbolic(__entry->status, SCAN_STATUS),
@@ -117,15 +113,14 @@ TRACE_EVENT(mm_collapse_huge_page,
TRACE_EVENT(mm_collapse_huge_page_isolate,
TP_PROTO(struct folio *folio, int none_or_zero,
- int referenced, bool writable, int status),
+ int referenced, int status),
- TP_ARGS(folio, none_or_zero, referenced, writable, status),
+ TP_ARGS(folio, none_or_zero, referenced, status),
TP_STRUCT__entry(
__field(unsigned long, pfn)
__field(int, none_or_zero)
__field(int, referenced)
- __field(bool, writable)
__field(int, status)
),
@@ -133,15 +128,13 @@ TRACE_EVENT(mm_collapse_huge_page_isolate,
__entry->pfn = folio ? folio_pfn(folio) : -1;
__entry->none_or_zero = none_or_zero;
__entry->referenced = referenced;
- __entry->writable = writable;
__entry->status = status;
),
- TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, writable=%d, status=%s",
+ TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, status=%s",
__entry->pfn,
__entry->none_or_zero,
__entry->referenced,
- __entry->writable,
__print_symbolic(__entry->status, SCAN_STATUS))
);
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 178ab6f611be..45d15460b495 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -340,8 +340,8 @@ TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe),
__entry->user_data = cqe->user_data;
__entry->res = cqe->res;
__entry->cflags = cqe->flags;
- __entry->extra1 = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0;
- __entry->extra2 = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0;
+ __entry->extra1 = ctx->flags & IORING_SETUP_CQE32 || cqe->flags & IORING_CQE_F_32 ? cqe->big_cqe[0] : 0;
+ __entry->extra2 = ctx->flags & IORING_SETUP_CQE32 || cqe->flags & IORING_CQE_F_32 ? cqe->big_cqe[1] : 0;
),
TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x "
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 474358773abe..7f93e754da5c 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -22,6 +22,7 @@ TRACE_EVENT(kmem_cache_alloc,
TP_STRUCT__entry(
__field( unsigned long, call_site )
__field( const void *, ptr )
+ __string( name, s->name )
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
@@ -32,6 +33,7 @@ TRACE_EVENT(kmem_cache_alloc,
TP_fast_assign(
__entry->call_site = call_site;
__entry->ptr = ptr;
+ __assign_str(name);
__entry->bytes_req = s->object_size;
__entry->bytes_alloc = s->size;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
@@ -41,9 +43,10 @@ TRACE_EVENT(kmem_cache_alloc,
(s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
+ TP_printk("call_site=%pS ptr=%p name=%s bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
+ __get_str(name),
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
diff --git a/include/trace/events/page_ref.h b/include/trace/events/page_ref.h
index fe33a255b7d0..ea6b5c4baf3d 100644
--- a/include/trace/events/page_ref.h
+++ b/include/trace/events/page_ref.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->flags = page->flags;
+ __entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
__entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;
@@ -77,7 +77,7 @@ DECLARE_EVENT_CLASS(page_ref_mod_and_test_template,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->flags = page->flags;
+ __entry->flags = page->flags.f;
__entry->count = page_ref_count(page);
__entry->mapcount = atomic_read(&page->_mapcount);
__entry->mapping = page->mapping;
diff --git a/include/trace/events/readahead.h b/include/trace/events/readahead.h
new file mode 100644
index 000000000000..0997ac5eceab
--- /dev/null
+++ b/include/trace/events/readahead.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM readahead
+
+#if !defined(_TRACE_FILEMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_READAHEAD_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+TRACE_EVENT(page_cache_ra_unbounded,
+ TP_PROTO(struct inode *inode, pgoff_t index, unsigned long nr_to_read,
+ unsigned long lookahead_size),
+
+ TP_ARGS(inode, index, nr_to_read, lookahead_size),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned long, nr_to_read)
+ __field(unsigned long, lookahead_size)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->nr_to_read = nr_to_read;
+ __entry->lookahead_size = lookahead_size;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu nr_to_read=%lu lookahead_size=%lu",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->nr_to_read, __entry->lookahead_size
+ )
+);
+
+TRACE_EVENT(page_cache_ra_order,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra),
+
+ TP_ARGS(inode, index, ra),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned int, order)
+ __field(unsigned int, size)
+ __field(unsigned int, async_size)
+ __field(unsigned int, ra_pages)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->order = ra->order;
+ __entry->size = ra->size;
+ __entry->async_size = ra->async_size;
+ __entry->ra_pages = ra->ra_pages;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu order=%u size=%u async_size=%u ra_pages=%u",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->order, __entry->size,
+ __entry->async_size, __entry->ra_pages
+ )
+);
+
+DECLARE_EVENT_CLASS(page_cache_ra_op,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+
+ TP_ARGS(inode, index, ra, req_count),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(pgoff_t, index)
+ __field(unsigned int, order)
+ __field(unsigned int, size)
+ __field(unsigned int, async_size)
+ __field(unsigned int, ra_pages)
+ __field(unsigned int, mmap_miss)
+ __field(loff_t, prev_pos)
+ __field(unsigned long, req_count)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = inode->i_ino;
+ __entry->s_dev = inode->i_sb->s_dev;
+ __entry->index = index;
+ __entry->order = ra->order;
+ __entry->size = ra->size;
+ __entry->async_size = ra->async_size;
+ __entry->ra_pages = ra->ra_pages;
+ __entry->mmap_miss = ra->mmap_miss;
+ __entry->prev_pos = ra->prev_pos;
+ __entry->req_count = req_count;
+ ),
+
+ TP_printk(
+ "dev=%d:%d ino=%lx index=%lu req_count=%lu order=%u size=%u async_size=%u ra_pages=%u mmap_miss=%u prev_pos=%lld",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev), __entry->i_ino,
+ __entry->index, __entry->req_count, __entry->order,
+ __entry->size, __entry->async_size, __entry->ra_pages,
+ __entry->mmap_miss, __entry->prev_pos
+ )
+);
+
+DEFINE_EVENT(page_cache_ra_op, page_cache_sync_ra,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+ TP_ARGS(inode, index, ra, req_count)
+);
+
+DEFINE_EVENT(page_cache_ra_op, page_cache_async_ra,
+ TP_PROTO(struct inode *inode, pgoff_t index, struct file_ra_state *ra,
+ unsigned long req_count),
+ TP_ARGS(inode, index, ra, req_count)
+);
+
+#endif /* _TRACE_FILEMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/misc/fs.h b/include/trace/misc/fs.h
index 0406ebe2a80a..7ead1c61f0cb 100644
--- a/include/trace/misc/fs.h
+++ b/include/trace/misc/fs.h
@@ -141,3 +141,25 @@
{ ATTR_TIMES_SET, "TIMES_SET" }, \
{ ATTR_TOUCH, "TOUCH"}, \
{ ATTR_DELEG, "DELEG"})
+
+#define show_statx_mask(flags) \
+ __print_flags(flags, "|", \
+ { STATX_TYPE, "TYPE" }, \
+ { STATX_MODE, "MODE" }, \
+ { STATX_NLINK, "NLINK" }, \
+ { STATX_UID, "UID" }, \
+ { STATX_GID, "GID" }, \
+ { STATX_ATIME, "ATIME" }, \
+ { STATX_MTIME, "MTIME" }, \
+ { STATX_CTIME, "CTIME" }, \
+ { STATX_INO, "INO" }, \
+ { STATX_SIZE, "SIZE" }, \
+ { STATX_BLOCKS, "BLOCKS" }, \
+ { STATX_BASIC_STATS, "BASIC_STATS" }, \
+ { STATX_BTIME, "BTIME" }, \
+ { STATX_MNT_ID, "MNT_ID" }, \
+ { STATX_DIOALIGN, "DIOALIGN" }, \
+ { STATX_MNT_ID_UNIQUE, "MNT_ID_UNIQUE" }, \
+ { STATX_SUBVOL, "SUBVOL" }, \
+ { STATX_WRITE_ATOMIC, "WRITE_ATOMIC" }, \
+ { STATX_DIO_READ_ALIGN, "DIO_READ_ALIGN" })
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index bdedbaccf776..cd7402e36b6d 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -57,6 +57,7 @@ extern "C" {
#define DRM_AMDGPU_USERQ 0x16
#define DRM_AMDGPU_USERQ_SIGNAL 0x17
#define DRM_AMDGPU_USERQ_WAIT 0x18
+#define DRM_AMDGPU_GEM_LIST_HANDLES 0x19
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -77,6 +78,7 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_USERQ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ, union drm_amdgpu_userq)
#define DRM_IOCTL_AMDGPU_USERQ_SIGNAL DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_SIGNAL, struct drm_amdgpu_userq_signal)
#define DRM_IOCTL_AMDGPU_USERQ_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_USERQ_WAIT, struct drm_amdgpu_userq_wait)
+#define DRM_IOCTL_AMDGPU_GEM_LIST_HANDLES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_LIST_HANDLES, struct drm_amdgpu_gem_list_handles)
/**
* DOC: memory domains
@@ -103,6 +105,8 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
+ *
+ * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing).
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -111,13 +115,15 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
+#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
- AMDGPU_GEM_DOMAIN_OA | \
- AMDGPU_GEM_DOMAIN_DOORBELL)
+ AMDGPU_GEM_DOMAIN_OA | \
+ AMDGPU_GEM_DOMAIN_DOORBELL | \
+ AMDGPU_GEM_DOMAIN_MMIO_REMAP)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -800,6 +806,21 @@ union drm_amdgpu_wait_fences {
#define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0
#define AMDGPU_GEM_OP_SET_PLACEMENT 1
+#define AMDGPU_GEM_OP_GET_MAPPING_INFO 2
+
+struct drm_amdgpu_gem_vm_entry {
+ /* Start of mapping (in bytes) */
+ __u64 addr;
+
+ /* Size of mapping (in bytes) */
+ __u64 size;
+
+ /* Mapping offset */
+ __u64 offset;
+
+ /* flags needed to recreate mapping */
+ __u64 flags;
+};
/* Sets or returns a value associated with a buffer. */
struct drm_amdgpu_gem_op {
@@ -807,8 +828,44 @@ struct drm_amdgpu_gem_op {
__u32 handle;
/** AMDGPU_GEM_OP_* */
__u32 op;
- /** Input or return value */
+ /** Input or return value. For MAPPING_INFO op: pointer to array of struct drm_amdgpu_gem_vm_entry */
__u64 value;
+ /** For MAPPING_INFO op: number of mappings (in/out) */
+ __u32 num_entries;
+
+ __u32 padding;
+};
+
+#define AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT (1 << 0)
+
+struct drm_amdgpu_gem_list_handles {
+ /* User pointer to array of drm_amdgpu_gem_bo_info_entry */
+ __u64 entries;
+
+ /* Size of entries buffer / Number of handles in process (if larger than size of buffer, must retry) */
+ __u32 num_entries;
+
+ __u32 padding;
+};
+
+struct drm_amdgpu_gem_list_handles_entry {
+ /* gem handle of buffer object */
+ __u32 gem_handle;
+
+ /* Currently just one flag: IS_IMPORT */
+ __u32 flags;
+
+ /* Size of bo */
+ __u64 size;
+
+ /* Preferred domains for GEM_CREATE */
+ __u64 preferred_domains;
+
+ /* GEM_CREATE flags for re-creation of buffer */
+ __u64 alloc_flags;
+
+ /* physical start_addr alignment in bytes for some HW requirements */
+ __u64 alignment;
};
#define AMDGPU_VA_OP_MAP 1
@@ -1031,10 +1088,11 @@ struct drm_amdgpu_cs_chunk_cp_gfx_shadow {
* Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU
*
*/
-#define AMDGPU_IDS_FLAGS_FUSION 0x1
-#define AMDGPU_IDS_FLAGS_PREEMPTION 0x2
-#define AMDGPU_IDS_FLAGS_TMZ 0x4
-#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8
+#define AMDGPU_IDS_FLAGS_FUSION 0x01
+#define AMDGPU_IDS_FLAGS_PREEMPTION 0x02
+#define AMDGPU_IDS_FLAGS_TMZ 0x04
+#define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x08
+#define AMDGPU_IDS_FLAGS_GANG_SUBMIT 0x10
/*
* Query h/w info: Flag identifying VF/PF/PT mode
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index a706ead39082..a1fb9785db77 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -34,6 +34,7 @@ enum amdxdna_drm_ioctl_id {
DRM_AMDXDNA_EXEC_CMD,
DRM_AMDXDNA_GET_INFO,
DRM_AMDXDNA_SET_STATE,
+ DRM_AMDXDNA_GET_ARRAY = 10,
};
/**
@@ -154,6 +155,31 @@ enum amdxdna_bo_type {
};
/**
+ * struct amdxdna_drm_va_entry
+ * @vaddr: Virtual address.
+ * @len: Size of entry.
+ */
+struct amdxdna_drm_va_entry {
+ __u64 vaddr;
+ __u64 len;
+};
+
+/**
+ * struct amdxdna_drm_va_tbl
+ * @dmabuf_fd: The fd of dmabuf.
+ * @num_entries: Number of va entries.
+ * @va_entries: Array of va entries.
+ *
+ * The input can be either a dmabuf fd or a virtual address entry table.
+ * When dmabuf_fd is used, num_entries must be zero.
+ */
+struct amdxdna_drm_va_tbl {
+ __s32 dmabuf_fd;
+ __u32 num_entries;
+ struct amdxdna_drm_va_entry va_entries[];
+};
+
+/**
* struct amdxdna_drm_create_bo - Create a buffer object.
* @flags: Buffer flags. MBZ.
* @vaddr: User VA of buffer if applied. MBZ.
@@ -430,6 +456,112 @@ struct amdxdna_drm_get_info {
__u64 buffer; /* in/out */
};
+#define AMDXDNA_HWCTX_STATE_IDLE 0
+#define AMDXDNA_HWCTX_STATE_ACTIVE 1
+
+/**
+ * struct amdxdna_drm_hwctx_entry - The hardware context array entry
+ */
+struct amdxdna_drm_hwctx_entry {
+ /** @context_id: Context ID. */
+ __u32 context_id;
+ /** @start_col: Start AIE array column assigned to context. */
+ __u32 start_col;
+ /** @num_col: Number of AIE array columns assigned to context. */
+ __u32 num_col;
+ /** @hwctx_id: The real hardware context id. */
+ __u32 hwctx_id;
+ /** @pid: ID of process which created this context. */
+ __s64 pid;
+ /** @command_submissions: Number of commands submitted. */
+ __u64 command_submissions;
+ /** @command_completions: Number of commands completed. */
+ __u64 command_completions;
+ /** @migrations: Number of times been migrated. */
+ __u64 migrations;
+ /** @preemptions: Number of times been preempted. */
+ __u64 preemptions;
+ /** @errors: Number of errors happened. */
+ __u64 errors;
+ /** @priority: Context priority. */
+ __u64 priority;
+ /** @heap_usage: Usage of device heap buffer. */
+ __u64 heap_usage;
+ /** @suspensions: Number of times been suspended. */
+ __u64 suspensions;
+ /**
+ * @state: Context state.
+ * %AMDXDNA_HWCTX_STATE_IDLE
+ * %AMDXDNA_HWCTX_STATE_ACTIVE
+ */
+ __u32 state;
+ /** @pasid: PASID been bound. */
+ __u32 pasid;
+ /** @gops: Giga operations per second. */
+ __u32 gops;
+ /** @fps: Frames per second. */
+ __u32 fps;
+ /** @dma_bandwidth: DMA bandwidth. */
+ __u32 dma_bandwidth;
+ /** @latency: Frame response latency. */
+ __u32 latency;
+ /** @frame_exec_time: Frame execution time. */
+ __u32 frame_exec_time;
+ /** @txn_op_idx: Index of last control code executed. */
+ __u32 txn_op_idx;
+ /** @ctx_pc: Program counter. */
+ __u32 ctx_pc;
+ /** @fatal_error_type: Fatal error type if context crashes. */
+ __u32 fatal_error_type;
+ /** @fatal_error_exception_type: Firmware exception type. */
+ __u32 fatal_error_exception_type;
+ /** @fatal_error_exception_pc: Firmware exception program counter. */
+ __u32 fatal_error_exception_pc;
+ /** @fatal_error_app_module: Exception module name. */
+ __u32 fatal_error_app_module;
+ /** @pad: Structure pad. */
+ __u32 pad;
+};
+
+#define DRM_AMDXDNA_HW_CONTEXT_ALL 0
+
+/**
+ * struct amdxdna_drm_get_array - Get information array.
+ */
+struct amdxdna_drm_get_array {
+ /**
+ * @param:
+ *
+ * Supported params:
+ *
+ * %DRM_AMDXDNA_HW_CONTEXT_ALL:
+ * Returns all created hardware contexts.
+ */
+ __u32 param;
+ /**
+ * @element_size:
+ *
+ * Specifies maximum element size and returns the actual element size.
+ */
+ __u32 element_size;
+ /**
+ * @num_element:
+ *
+ * Specifies maximum number of elements and returns the actual number
+ * of elements.
+ */
+ __u32 num_element; /* in/out */
+ /** @pad: MBZ */
+ __u32 pad;
+ /**
+ * @buffer:
+ *
+ * Specifies the match conditions and returns the matched information
+ * array.
+ */
+ __u64 buffer;
+};
+
enum amdxdna_drm_set_param {
DRM_AMDXDNA_SET_POWER_MODE,
DRM_AMDXDNA_WRITE_AIE_MEM,
@@ -494,6 +626,10 @@ struct amdxdna_drm_set_power_mode {
DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_SET_STATE, \
struct amdxdna_drm_set_state)
+#define DRM_IOCTL_AMDXDNA_GET_ARRAY \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDXDNA_GET_ARRAY, \
+ struct amdxdna_drm_get_array)
+
#if defined(__cplusplus)
} /* extern c end */
#endif
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index e63a71d3c607..3cd5cf15e3c9 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -597,35 +597,66 @@ struct drm_set_version {
int drm_dd_minor;
};
-/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/**
+ * struct drm_gem_close - Argument for &DRM_IOCTL_GEM_CLOSE ioctl.
+ * @handle: Handle of the object to be closed.
+ * @pad: Padding.
+ *
+ * Releases the handle to an mm object.
+ */
struct drm_gem_close {
- /** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/* DRM_IOCTL_GEM_FLINK ioctl argument type */
+/**
+ * struct drm_gem_flink - Argument for &DRM_IOCTL_GEM_FLINK ioctl.
+ * @handle: Handle for the object being named.
+ * @name: Returned global name.
+ *
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
struct drm_gem_flink {
- /** Handle for the object being named */
__u32 handle;
-
- /** Returned global name */
__u32 name;
};
-/* DRM_IOCTL_GEM_OPEN ioctl argument type */
+/**
+ * struct drm_gem_open - Argument for &DRM_IOCTL_GEM_OPEN ioctl.
+ * @name: Name of object being opened.
+ * @handle: Returned handle for the object.
+ * @size: Returned size of the object
+ *
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
struct drm_gem_open {
- /** Name of object being opened */
__u32 name;
-
- /** Returned handle for the object */
__u32 handle;
-
- /** Returned size of the object */
__u64 size;
};
/**
+ * struct drm_gem_change_handle - Argument for &DRM_IOCTL_GEM_CHANGE_HANDLE ioctl.
+ * @handle: The handle of a gem object.
+ * @new_handle: An available gem handle.
+ *
+ * This ioctl changes the handle of a GEM object to the specified one.
+ * The new handle must be unused. On success the old handle is closed
+ * and all further IOCTL should refer to the new handle only.
+ * Calls to DRM_IOCTL_PRIME_FD_TO_HANDLE will return the new handle.
+ */
+struct drm_gem_change_handle {
+ __u32 handle;
+ __u32 new_handle;
+};
+
+/**
* DRM_CAP_DUMB_BUFFER
*
* If set to 1, the driver supports creating dumb buffers via the
@@ -1309,6 +1340,14 @@ extern "C" {
*/
#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name)
+/**
+ * DRM_IOCTL_GEM_CHANGE_HANDLE - Move an object to a different handle
+ *
+ * Some applications (notably CRIU) need objects to have specific gem handles.
+ * This ioctl changes the object at one gem handle to use a new gem handle.
+ */
+#define DRM_IOCTL_GEM_CHANGE_HANDLE DRM_IOWR(0xD2, struct drm_gem_change_handle)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index c082810c08a8..a122bea25593 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -962,6 +962,14 @@ struct hdr_output_metadata {
* Request that the kernel sends back a vblank event (see
* struct drm_event_vblank) with the &DRM_EVENT_FLIP_COMPLETE type when the
* page-flip is done.
+ *
+ * When used with atomic uAPI, one event will be delivered per CRTC included in
+ * the atomic commit. A CRTC is included in an atomic commit if one of its
+ * properties is set, or if a property is set on a connector or plane linked
+ * via the CRTC_ID property to the CRTC. At least one CRTC must be included,
+ * and all pulled in CRTCs must be either previously or newly powered on (in
+ * other words, a powered off CRTC which stays off cannot be included in the
+ * atomic commit).
*/
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
/**
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index e1f43deb7eca..467d365ed7ba 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -327,6 +327,9 @@ struct drm_panthor_gpu_info {
/** @pad: MBZ. */
__u32 pad;
+
+ /** @gpu_features: Bitmask describing supported GPU-wide features */
+ __u64 gpu_features;
};
/**
diff --git a/include/uapi/drm/rocket_accel.h b/include/uapi/drm/rocket_accel.h
new file mode 100644
index 000000000000..14b2e12b7c49
--- /dev/null
+++ b/include/uapi/drm/rocket_accel.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Tomeu Vizoso
+ */
+#ifndef __DRM_UAPI_ROCKET_ACCEL_H__
+#define __DRM_UAPI_ROCKET_ACCEL_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_ROCKET_CREATE_BO 0x00
+#define DRM_ROCKET_SUBMIT 0x01
+#define DRM_ROCKET_PREP_BO 0x02
+#define DRM_ROCKET_FINI_BO 0x03
+
+#define DRM_IOCTL_ROCKET_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_ROCKET_CREATE_BO, struct drm_rocket_create_bo)
+#define DRM_IOCTL_ROCKET_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_SUBMIT, struct drm_rocket_submit)
+#define DRM_IOCTL_ROCKET_PREP_BO DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_PREP_BO, struct drm_rocket_prep_bo)
+#define DRM_IOCTL_ROCKET_FINI_BO DRM_IOW(DRM_COMMAND_BASE + DRM_ROCKET_FINI_BO, struct drm_rocket_fini_bo)
+
+/**
+ * struct drm_rocket_create_bo - ioctl argument for creating Rocket BOs.
+ *
+ */
+struct drm_rocket_create_bo {
+ /** Input: Size of the requested BO. */
+ __u32 size;
+
+ /** Output: GEM handle for the BO. */
+ __u32 handle;
+
+ /**
+ * Output: DMA address for the BO in the NPU address space. This address
+ * is private to the DRM fd and is valid for the lifetime of the GEM
+ * handle.
+ */
+ __u64 dma_address;
+
+ /** Output: Offset into the drm node to use for subsequent mmap call. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_rocket_prep_bo - ioctl argument for starting CPU ownership of the BO.
+ *
+ * Takes care of waiting for any NPU jobs that might still use the NPU and performs cache
+ * synchronization.
+ */
+struct drm_rocket_prep_bo {
+ /** Input: GEM handle of the buffer object. */
+ __u32 handle;
+
+ /** Reserved, must be zero. */
+ __u32 reserved;
+
+ /** Input: Amount of time to wait for NPU jobs. */
+ __s64 timeout_ns;
+};
+
+/**
+ * struct drm_rocket_fini_bo - ioctl argument for finishing CPU ownership of the BO.
+ *
+ * Synchronize caches for NPU access.
+ */
+struct drm_rocket_fini_bo {
+ /** Input: GEM handle of the buffer object. */
+ __u32 handle;
+
+ /** Reserved, must be zero. */
+ __u32 reserved;
+};
+
+/**
+ * struct drm_rocket_task - A task to be run on the NPU
+ *
+ * A task is the smallest unit of work that can be run on the NPU.
+ */
+struct drm_rocket_task {
+ /** Input: DMA address to NPU mapping of register command buffer */
+ __u32 regcmd;
+
+ /** Input: Number of commands in the register command buffer */
+ __u32 regcmd_count;
+};
+
+/**
+ * struct drm_rocket_job - A job to be run on the NPU
+ *
+ * The kernel will schedule the execution of this job taking into account its
+ * dependencies with other jobs. All tasks in the same job will be executed
+ * sequentially on the same core, to benefit from memory residency in SRAM.
+ */
+struct drm_rocket_job {
+ /** Input: Pointer to an array of struct drm_rocket_task. */
+ __u64 tasks;
+
+ /** Input: Pointer to a u32 array of the BOs that are read by the job. */
+ __u64 in_bo_handles;
+
+ /** Input: Pointer to a u32 array of the BOs that are written to by the job. */
+ __u64 out_bo_handles;
+
+ /** Input: Number of tasks passed in. */
+ __u32 task_count;
+
+ /** Input: Size in bytes of the structs in the @tasks field. */
+ __u32 task_struct_size;
+
+ /** Input: Number of input BO handles passed in (size is that times 4). */
+ __u32 in_bo_handle_count;
+
+ /** Input: Number of output BO handles passed in (size is that times 4). */
+ __u32 out_bo_handle_count;
+};
+
+/**
+ * struct drm_rocket_submit - ioctl argument for submitting commands to the NPU.
+ *
+ * The kernel will schedule the execution of these jobs in dependency order.
+ */
+struct drm_rocket_submit {
+ /** Input: Pointer to an array of struct drm_rocket_job. */
+ __u64 jobs;
+
+ /** Input: Number of jobs passed in. */
+ __u32 job_count;
+
+ /** Input: Size in bytes of the structs in the @jobs field. */
+ __u32 job_struct_size;
+
+ /** Reserved, must be zero. */
+ __u64 reserved;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __DRM_UAPI_ROCKET_ACCEL_H__ */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index dbbc404d2b3d..d9b01f4c3a04 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -294,6 +294,8 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
DRM_V3D_PARAM_MAX_PERF_COUNTERS,
DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES,
+ DRM_V3D_PARAM_GLOBAL_RESET_COUNTER,
+ DRM_V3D_PARAM_CONTEXT_RESET_COUNTER,
};
struct drm_v3d_get_param {
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index e2426413488f..40ff19f52a8d 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -81,6 +81,8 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
+ * - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@@ -102,6 +104,8 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
+#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -117,6 +121,8 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
+#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -760,7 +766,11 @@ struct drm_xe_device_query {
* gem creation
*
* The @flags can be:
- * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
+ * allocation strategy by deferring physical memory allocation
+ * until the object is either bound to a virtual memory region via
+ * VM_BIND or accessed by the CPU. As a result, no backing memory is
+ * reserved at the time of GEM object creation.
* - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
* - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
* possible placement, ensure that the corresponding VRAM allocation
@@ -1003,6 +1013,10 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ *
+ * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
+ * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
+ * the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1108,6 +1122,7 @@ struct drm_xe_vm_bind_op {
/** @flags: Bind flags */
__u32 flags;
+#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@@ -1974,6 +1989,271 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
+/**
+ * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
+ *
+ * This structure is used to set memory attributes for a virtual address range
+ * in a VM. The type of attribute is specified by @type, and the corresponding
+ * union member is used to provide additional parameters for @type.
+ *
+ * Supported attribute types:
+ * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
+ * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
+ * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_madvise madvise = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
+ * .atomic_val = DRM_XE_ATOMIC_DEVICE,
+ * };
+ *
+ * ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
+ *
+ */
+struct drm_xe_madvise {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
+#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
+#define DRM_XE_MEM_RANGE_ATTR_PAT 2
+ /** @type: type of attribute */
+ __u32 type;
+
+ union {
+ /**
+ * @preferred_mem_loc: preferred memory location
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
+ *
+ * Supported values for @preferred_mem_loc.devmem_fd:
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
+ *
+ * Supported values for @preferred_mem_loc.migration_policy:
+ * - DRM_XE_MIGRATE_ALL_PAGES
+ * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
+ */
+ struct {
+#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
+#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+#define DRM_XE_MIGRATE_ALL_PAGES 0
+#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u16 migration_policy;
+
+ /** @preferred_mem_loc.pad : MBZ */
+ __u16 pad;
+
+ /** @preferred_mem_loc.reserved : Reserved */
+ __u64 reserved;
+ } preferred_mem_loc;
+
+ /**
+ * @atomic: Atomic access policy
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
+ *
+ * Supported values for @atomic.val:
+ * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
+ * Support both GPU and CPU atomic operations for system allocator.
+ * Support GPU atomic operations for normal(bo) allocator.
+ * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
+ * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
+ * - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
+ */
+ struct {
+#define DRM_XE_ATOMIC_UNDEFINED 0
+#define DRM_XE_ATOMIC_DEVICE 1
+#define DRM_XE_ATOMIC_GLOBAL 2
+#define DRM_XE_ATOMIC_CPU 3
+ /** @atomic.val: value of atomic operation */
+ __u32 val;
+
+ /** @atomic.pad: MBZ */
+ __u32 pad;
+
+ /** @atomic.reserved: Reserved */
+ __u64 reserved;
+ } atomic;
+
+ /**
+ * @pat_index: Page attribute table index
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
+ */
+ struct {
+ /** @pat_index.val: PAT index value */
+ __u32 val;
+
+ /** @pat_index.pad: MBZ */
+ __u32 pad;
+
+ /** @pat_index.reserved: Reserved */
+ __u64 reserved;
+ } pat_index;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
+ *
+ * This structure is provided by userspace and filled by KMD in response to the
+ * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
+ * a memory ranges within a user specified address range in a VM.
+ *
+ * The structure includes information such as atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ * Userspace allocates an array of these structures and passes a pointer to the
+ * ioctl to retrieve attributes for each memory ranges
+ *
+ * @extensions: Pointer to the first extension struct, if any
+ * @start: Start address of the memory range
+ * @end: End address of the virtual memory range
+ *
+ */
+struct drm_xe_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the memory range */
+ __u64 start;
+
+ /** @end: end of the memory range */
+ __u64 end;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u32 migration_policy;
+ } preferred_mem_loc;
+
+ /** @atomic: Atomic access policy */
+ struct {
+ /** @atomic.val: atomic attribute */
+ __u32 val;
+
+ /** @atomic.reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ /** @pat_index: Page attribute table index */
+ struct {
+ /** @pat_index.val: PAT index */
+ __u32 val;
+
+ /** @pat_index.reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * This structure is used to query memory attributes of memory regions
+ * within a user specified address range in a VM. It provides detailed
+ * information about each memory range, including atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ *
+ * Userspace first calls the ioctl with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
+ * the number of memory regions and size of each memory range attribute.
+ * Then, it allocates a buffer of that size and calls the ioctl again to fill
+ * the buffer with memory range attributes.
+ *
+ * If second call fails with -ENOSPC, it means memory ranges changed between
+ * first call and now, retry IOCTL again with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
+ * Second ioctl call.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_vm_query_mem_range_attr query = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * };
+ *
+ * // First ioctl call to get num of mem regions and sizeof each attribute
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Allocate buffer for the memory region attributes
+ * void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
+ * void *ptr_start = ptr;
+ *
+ * query.vector_of_mem_attr = (uintptr_t)ptr;
+ *
+ * // Second ioctl call to actually fill the memory attributes
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Iterate over the returned memory region attributes
+ * for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
+ * struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
+ *
+ * // Do something with attr
+ *
+ * // Move pointer by one entry
+ * ptr += query.sizeof_mem_range_attr;
+ * }
+ *
+ * free(ptr_start);
+ */
+struct drm_xe_vm_query_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_mem_ranges: number of mem_ranges in range */
+ __u32 num_mem_ranges;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
+ __u64 sizeof_mem_range_attr;
+
+ /** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */
+ __u64 vector_of_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/aspeed-video.h b/include/uapi/linux/aspeed-video.h
index 6586a65548c4..15168e8c931e 100644
--- a/include/uapi/linux/aspeed-video.h
+++ b/include/uapi/linux/aspeed-video.h
@@ -8,6 +8,13 @@
#include <linux/v4l2-controls.h>
+/* aspeed video's input types */
+enum aspeed_video_input {
+ VIDEO_INPUT_VGA = 0,
+ VIDEO_INPUT_GFX,
+ VIDEO_INPUT_MAX
+};
+
#define V4L2_CID_ASPEED_HQ_MODE (V4L2_CID_USER_ASPEED_BASE + 1)
#define V4L2_CID_ASPEED_HQ_JPEG_QUALITY (V4L2_CID_USER_ASPEED_BASE + 2)
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 02ec32d69474..ef62f56eaaef 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -101,8 +101,8 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
#define CAN_CTRLMODE_CC_LEN8_DLC 0x100 /* Classic CAN DLC option */
-#define CAN_CTRLMODE_TDC_AUTO 0x200 /* CAN transiver automatically calculates TDCV */
-#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* TDCV is manually set up by user */
+#define CAN_CTRLMODE_TDC_AUTO 0x200 /* FD transceiver automatically calculates TDCV */
+#define CAN_CTRLMODE_TDC_MANUAL 0x400 /* FD TDCV is manually set up by user */
/*
* CAN device statistics
@@ -129,14 +129,14 @@ enum {
IFLA_CAN_RESTART_MS,
IFLA_CAN_RESTART,
IFLA_CAN_BERR_COUNTER,
- IFLA_CAN_DATA_BITTIMING,
- IFLA_CAN_DATA_BITTIMING_CONST,
+ IFLA_CAN_DATA_BITTIMING, /* FD */
+ IFLA_CAN_DATA_BITTIMING_CONST, /* FD */
IFLA_CAN_TERMINATION,
IFLA_CAN_TERMINATION_CONST,
IFLA_CAN_BITRATE_CONST,
- IFLA_CAN_DATA_BITRATE_CONST,
+ IFLA_CAN_DATA_BITRATE_CONST, /* FD */
IFLA_CAN_BITRATE_MAX,
- IFLA_CAN_TDC,
+ IFLA_CAN_TDC, /* FD */
IFLA_CAN_CTRLMODE_EXT,
/* add new constants above here */
@@ -145,7 +145,7 @@ enum {
};
/*
- * CAN FD Transmitter Delay Compensation (TDC)
+ * CAN FD/XL Transmitter Delay Compensation (TDC)
*
* Please refer to struct can_tdc_const and can_tdc in
* include/linux/can/bittiming.h for further details.
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 9fcb25a0f447..bcad11a787a5 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -636,6 +636,8 @@ enum devlink_attr {
DEVLINK_ATTR_RATE_TC_BWS, /* nested */
+ DEVLINK_ATTR_HEALTH_REPORTER_BURST_PERIOD, /* u64 */
+
/* Add new attributes above here, update the spec in
* Documentation/netlink/specs/devlink.yaml and re-generate
* net/devlink/netlink_gen.c.
diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h
index 37b438ce8efc..ab1725a954d7 100644
--- a/include/uapi/linux/dpll.h
+++ b/include/uapi/linux/dpll.h
@@ -216,6 +216,7 @@ enum dpll_a {
DPLL_A_LOCK_STATUS_ERROR,
DPLL_A_CLOCK_QUALITY_LEVEL,
DPLL_A_PHASE_OFFSET_MONITOR,
+ DPLL_A_PHASE_OFFSET_AVG_FACTOR,
__DPLL_A_MAX,
DPLL_A_MAX = (__DPLL_A_MAX - 1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 9e9afdd1238a..8bd5ea5469d9 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -2380,6 +2380,7 @@ enum {
#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */
+#define RXH_IP6_FL (1 << 9) /* IPv6 flow label */
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
diff --git a/include/uapi/linux/ethtool_netlink_generated.h b/include/uapi/linux/ethtool_netlink_generated.h
index e3b8813465d7..0e8ac0d974e2 100644
--- a/include/uapi/linux/ethtool_netlink_generated.h
+++ b/include/uapi/linux/ethtool_netlink_generated.h
@@ -562,11 +562,23 @@ enum {
};
enum {
+ ETHTOOL_A_FEC_HIST_PAD = 1,
+ ETHTOOL_A_FEC_HIST_BIN_LOW,
+ ETHTOOL_A_FEC_HIST_BIN_HIGH,
+ ETHTOOL_A_FEC_HIST_BIN_VAL,
+ ETHTOOL_A_FEC_HIST_BIN_VAL_PER_LANE,
+
+ __ETHTOOL_A_FEC_HIST_CNT,
+ ETHTOOL_A_FEC_HIST_MAX = (__ETHTOOL_A_FEC_HIST_CNT - 1)
+};
+
+enum {
ETHTOOL_A_FEC_STAT_UNSPEC,
ETHTOOL_A_FEC_STAT_PAD,
ETHTOOL_A_FEC_STAT_CORRECTED,
ETHTOOL_A_FEC_STAT_UNCORR,
ETHTOOL_A_FEC_STAT_CORR_BITS,
+ ETHTOOL_A_FEC_STAT_HIST,
__ETHTOOL_A_FEC_STAT_CNT,
ETHTOOL_A_FEC_STAT_MAX = (__ETHTOOL_A_FEC_STAT_CNT - 1)
diff --git a/include/uapi/linux/ext4.h b/include/uapi/linux/ext4.h
index 1c4c2dd29112..411dcc1e4a35 100644
--- a/include/uapi/linux/ext4.h
+++ b/include/uapi/linux/ext4.h
@@ -33,6 +33,8 @@
#define EXT4_IOC_CHECKPOINT _IOW('f', 43, __u32)
#define EXT4_IOC_GETFSUUID _IOR('f', 44, struct fsuuid)
#define EXT4_IOC_SETFSUUID _IOW('f', 44, struct fsuuid)
+#define EXT4_IOC_GET_TUNE_SB_PARAM _IOR('f', 45, struct ext4_tune_sb_params)
+#define EXT4_IOC_SET_TUNE_SB_PARAM _IOW('f', 46, struct ext4_tune_sb_params)
#define EXT4_IOC_SHUTDOWN _IOR('X', 125, __u32)
@@ -108,6 +110,57 @@ struct ext4_new_group_input {
__u16 unused;
};
+struct ext4_tune_sb_params {
+ __u32 set_flags;
+ __u32 checkinterval;
+ __u16 errors_behavior;
+ __u16 mnt_count;
+ __u16 max_mnt_count;
+ __u16 raid_stride;
+ __u64 last_check_time;
+ __u64 reserved_blocks;
+ __u64 blocks_count;
+ __u32 default_mnt_opts;
+ __u32 reserved_uid;
+ __u32 reserved_gid;
+ __u32 raid_stripe_width;
+ __u16 encoding;
+ __u16 encoding_flags;
+ __u8 def_hash_alg;
+ __u8 pad_1;
+ __u16 pad_2;
+ __u32 feature_compat;
+ __u32 feature_incompat;
+ __u32 feature_ro_compat;
+ __u32 set_feature_compat_mask;
+ __u32 set_feature_incompat_mask;
+ __u32 set_feature_ro_compat_mask;
+ __u32 clear_feature_compat_mask;
+ __u32 clear_feature_incompat_mask;
+ __u32 clear_feature_ro_compat_mask;
+ __u8 mount_opts[64];
+ __u8 pad[64];
+};
+
+#define EXT4_TUNE_FL_ERRORS_BEHAVIOR 0x00000001
+#define EXT4_TUNE_FL_MNT_COUNT 0x00000002
+#define EXT4_TUNE_FL_MAX_MNT_COUNT 0x00000004
+#define EXT4_TUNE_FL_CHECKINTRVAL 0x00000008
+#define EXT4_TUNE_FL_LAST_CHECK_TIME 0x00000010
+#define EXT4_TUNE_FL_RESERVED_BLOCKS 0x00000020
+#define EXT4_TUNE_FL_RESERVED_UID 0x00000040
+#define EXT4_TUNE_FL_RESERVED_GID 0x00000080
+#define EXT4_TUNE_FL_DEFAULT_MNT_OPTS 0x00000100
+#define EXT4_TUNE_FL_DEF_HASH_ALG 0x00000200
+#define EXT4_TUNE_FL_RAID_STRIDE 0x00000400
+#define EXT4_TUNE_FL_RAID_STRIPE_WIDTH 0x00000800
+#define EXT4_TUNE_FL_MOUNT_OPTS 0x00001000
+#define EXT4_TUNE_FL_FEATURES 0x00002000
+#define EXT4_TUNE_FL_EDIT_FEATURES 0x00004000
+#define EXT4_TUNE_FL_FORCE_FSCK 0x00008000
+#define EXT4_TUNE_FL_ENCODING 0x00010000
+#define EXT4_TUNE_FL_ENCODING_FLAGS 0x00020000
+
/*
* Returned by EXT4_IOC_GET_ES_CACHE as an additional possible flag.
* It indicates that the entry in extent status cache is for a hole.
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 122d6586e8d4..c13e1f9a2f12 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -235,6 +235,11 @@
*
* 7.44
* - add FUSE_NOTIFY_INC_EPOCH
+ *
+ * 7.45
+ * - add FUSE_COPY_FILE_RANGE_64
+ * - add struct fuse_copy_file_range_out
+ * - add FUSE_NOTIFY_PRUNE
*/
#ifndef _LINUX_FUSE_H
@@ -270,7 +275,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 44
+#define FUSE_KERNEL_MINOR_VERSION 45
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -657,6 +662,7 @@ enum fuse_opcode {
FUSE_SYNCFS = 50,
FUSE_TMPFILE = 51,
FUSE_STATX = 52,
+ FUSE_COPY_FILE_RANGE_64 = 53,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -675,7 +681,7 @@ enum fuse_notify_code {
FUSE_NOTIFY_DELETE = 6,
FUSE_NOTIFY_RESEND = 7,
FUSE_NOTIFY_INC_EPOCH = 8,
- FUSE_NOTIFY_CODE_MAX,
+ FUSE_NOTIFY_PRUNE = 9,
};
/* The read buffer is required to be at least 8k, but may be much larger */
@@ -1114,6 +1120,12 @@ struct fuse_notify_retrieve_in {
uint64_t dummy4;
};
+struct fuse_notify_prune_out {
+ uint32_t count;
+ uint32_t padding;
+ uint64_t spare;
+};
+
struct fuse_backing_map {
int32_t fd;
uint32_t flags;
@@ -1126,6 +1138,7 @@ struct fuse_backing_map {
#define FUSE_DEV_IOC_BACKING_OPEN _IOW(FUSE_DEV_IOC_MAGIC, 1, \
struct fuse_backing_map)
#define FUSE_DEV_IOC_BACKING_CLOSE _IOW(FUSE_DEV_IOC_MAGIC, 2, uint32_t)
+#define FUSE_DEV_IOC_SYNC_INIT _IO(FUSE_DEV_IOC_MAGIC, 3)
struct fuse_lseek_in {
uint64_t fh;
@@ -1148,6 +1161,11 @@ struct fuse_copy_file_range_in {
uint64_t flags;
};
+/* For FUSE_COPY_FILE_RANGE_64 */
+struct fuse_copy_file_range_out {
+ uint64_t bytes_copied;
+};
+
#define FUSE_SETUPMAPPING_FLAG_WRITE (1ull << 0)
#define FUSE_SETUPMAPPING_FLAG_READ (1ull << 1)
struct fuse_setupmapping_in {
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 73876c0e2bba..e52f8207ab27 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -823,6 +823,8 @@ struct br_mcast_stats {
/* bridge boolean options
* BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets
* BR_BOOLOPT_MCAST_VLAN_SNOOPING - control vlan multicast snooping
+ * BR_BOOLOPT_FDB_LOCAL_VLAN_0 - local FDB entries installed by the bridge
+ * driver itself should only be added on VLAN 0
*
* IMPORTANT: if adding a new option do not forget to handle
* it in br_boolopt_toggle/get and bridge sysfs
@@ -832,6 +834,7 @@ enum br_boolopt_id {
BR_BOOLOPT_MCAST_VLAN_SNOOPING,
BR_BOOLOPT_MST_ENABLE,
BR_BOOLOPT_MDB_OFFLOAD_FAIL_NOTIFICATION,
+ BR_BOOLOPT_FDB_LOCAL_VLAN_0,
BR_BOOLOPT_MAX
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 784ace3a519c..3b491d96e52e 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -379,6 +379,8 @@ enum {
IFLA_DPLL_PIN,
IFLA_MAX_PACING_OFFLOAD_HORIZON,
IFLA_NETNS_IMMUTABLE,
+ IFLA_HEADROOM,
+ IFLA_TAILROOM,
__IFLA_MAX
};
@@ -1564,6 +1566,7 @@ enum {
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
IFLA_BOND_SLAVE_PRIO,
+ IFLA_BOND_SLAVE_ACTOR_PORT_PRIO,
__IFLA_BOND_SLAVE_MAX,
};
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 6957dc539d83..a0cc1cc0dd01 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -225,6 +225,12 @@ enum io_uring_sqe_flags_bit {
/* Use hybrid poll in iopoll process */
#define IORING_SETUP_HYBRID_IOPOLL (1U << 17)
+/*
+ * Allow both 16b and 32b CQEs. If a 32b CQE is posted, it will have
+ * IORING_CQE_F_32 set in cqe->flags.
+ */
+#define IORING_SETUP_CQE_MIXED (1U << 18)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -298,9 +304,13 @@ enum io_uring_op {
* sqe->uring_cmd_flags top 8bits aren't available for userspace
* IORING_URING_CMD_FIXED use registered buffer; pass this flag
* along with setting sqe->buf_index.
+ * IORING_URING_CMD_MULTISHOT must be used with buffer select, like other
+ * multishot commands. Not compatible with
+ * IORING_URING_CMD_FIXED, for now.
*/
#define IORING_URING_CMD_FIXED (1U << 0)
-#define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED
+#define IORING_URING_CMD_MULTISHOT (1U << 1)
+#define IORING_URING_CMD_MASK (IORING_URING_CMD_FIXED | IORING_URING_CMD_MULTISHOT)
/*
@@ -454,6 +464,7 @@ enum io_uring_msg_ring_flags {
#define IORING_NOP_FIXED_FILE (1U << 2)
#define IORING_NOP_FIXED_BUFFER (1U << 3)
#define IORING_NOP_TW (1U << 4)
+#define IORING_NOP_CQE32 (1U << 5)
/*
* IO completion data structure (Completion Queue Entry)
@@ -487,12 +498,22 @@ struct io_uring_cqe {
* other provided buffer type, all completions with a
* buffer passed back is automatically returned to the
* application.
+ * IORING_CQE_F_SKIP If set, then the application/liburing must ignore this
+ * CQE. It's only purpose is to fill a gap in the ring,
+ * if a large CQE is attempted posted when the ring has
+ * just a single small CQE worth of space left before
+ * wrapping.
+ * IORING_CQE_F_32 If set, this is a 32b/big-cqe posting. Use with rings
+ * setup in a mixed CQE mode, where both 16b and 32b
+ * CQEs may be posted to the CQ ring.
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
#define IORING_CQE_F_NOTIF (1U << 3)
#define IORING_CQE_F_BUF_MORE (1U << 4)
+#define IORING_CQE_F_SKIP (1U << 5)
+#define IORING_CQE_F_32 (1U << 15)
#define IORING_CQE_BUFFER_SHIFT 16
@@ -665,6 +686,12 @@ enum io_uring_register_op {
IORING_REGISTER_MEM_REGION = 34,
+ /* query various aspects of io_uring, see linux/io_uring/query.h */
+ IORING_REGISTER_QUERY = 35,
+
+ /* return zcrx buffers back into circulation */
+ IORING_REGISTER_ZCRX_REFILL = 36,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -1046,6 +1073,15 @@ struct io_uring_zcrx_ifq_reg {
__u64 __resv[3];
};
+struct io_uring_zcrx_sync_refill {
+ __u32 zcrx_id;
+ /* the number of entries to return */
+ __u32 nr_entries;
+ /* pointer to an array of struct io_uring_zcrx_rqe */
+ __u64 rqes;
+ __u64 __resv[2];
+};
+
#ifdef __cplusplus
}
#endif
diff --git a/include/uapi/linux/io_uring/query.h b/include/uapi/linux/io_uring/query.h
new file mode 100644
index 000000000000..5d754322a27c
--- /dev/null
+++ b/include/uapi/linux/io_uring/query.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Header file for the io_uring query interface.
+ */
+#ifndef LINUX_IO_URING_QUERY_H
+#define LINUX_IO_URING_QUERY_H
+
+#include <linux/types.h>
+
+struct io_uring_query_hdr {
+ __u64 next_entry;
+ __u64 query_data;
+ __u32 query_op;
+ __u32 size;
+ __s32 result;
+ __u32 __resv[3];
+};
+
+enum {
+ IO_URING_QUERY_OPCODES = 0,
+
+ __IO_URING_QUERY_MAX,
+};
+
+/* Doesn't require a ring */
+struct io_uring_query_opcode {
+ /* The number of supported IORING_OP_* opcodes */
+ __u32 nr_request_opcodes;
+ /* The number of supported IORING_[UN]REGISTER_* opcodes */
+ __u32 nr_register_opcodes;
+ /* Bitmask of all supported IORING_FEAT_* flags */
+ __u64 feature_flags;
+ /* Bitmask of all supported IORING_SETUP_* flags */
+ __u64 ring_setup_flags;
+ /* Bitmask of all supported IORING_ENTER_** flags */
+ __u64 enter_flags;
+ /* Bitmask of all supported IOSQE_* flags */
+ __u64 sqe_flags;
+};
+
+#endif
diff --git a/include/uapi/linux/ivtv.h b/include/uapi/linux/ivtv.h
index e74f18642b11..c9241f7271c4 100644
--- a/include/uapi/linux/ivtv.h
+++ b/include/uapi/linux/ivtv.h
@@ -2,7 +2,7 @@
/*
Public ivtv API header
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2004-2007 Hans Verkuil <hverkuil@kernel.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 8958ebfcff94..55749cb0b81d 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -22,12 +22,16 @@
* KEXEC_FILE_ON_CRASH : Load/unload operation belongs to kdump image.
* KEXEC_FILE_NO_INITRAMFS : No initramfs is being loaded. Ignore the initrd
* fd field.
+ * KEXEC_FILE_FORCE_DTB : Force carrying over the current boot's DTB to the new
+ * kernel on x86. This is already the default behavior on
+ * some other architectures, like ARM64 and PowerPC.
*/
#define KEXEC_FILE_UNLOAD 0x00000001
#define KEXEC_FILE_ON_CRASH 0x00000002
#define KEXEC_FILE_NO_INITRAMFS 0x00000004
#define KEXEC_FILE_DEBUG 0x00000008
#define KEXEC_FILE_NO_CMA 0x00000010
+#define KEXEC_FILE_FORCE_DTB 0x00000020
/* These values match the ELF architecture values.
* Unless there is a good reason that should continue to be the case.
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 1f9bb10d1a47..8fbbe613611a 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -66,10 +66,16 @@ enum {
#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
/*
- * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
- * ABI. New bits are OK, but existing bits can never change.
+ * Enabling zone reclaim means the page allocator will attempt to fulfill
+ * the allocation request on the current node by triggering reclaim and
+ * trying to shrink the current node.
+ * Fallback allocations on the next candidates in the zonelist are considered
+ * when reclaim fails to free up enough memory in the current node/zone.
+ *
+ * These bit locations are exposed in the vm.zone_reclaim_mode sysctl.
+ * New bits are OK, but existing bits should not be changed.
*/
-#define RECLAIM_ZONE (1<<0) /* Run shrink_inactive_list on the zone */
+#define RECLAIM_ZONE (1<<0) /* Enable zone reclaim */
#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */
#define RECLAIM_UNMAP (1<<2) /* Unmap pages during reclaim */
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index 5fd5b4cf75ca..87cfab874e24 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -32,20 +32,27 @@
#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
#define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 _BITUL(0)
+#define MPTCP_PM_EV_FLAG_SERVER_SIDE _BITUL(1)
-#define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
-#define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
-#define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
-#define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3)
-#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4)
+#define MPTCP_PM_ADDR_FLAG_SIGNAL _BITUL(0)
+#define MPTCP_PM_ADDR_FLAG_SUBFLOW _BITUL(1)
+#define MPTCP_PM_ADDR_FLAG_BACKUP _BITUL(2)
+#define MPTCP_PM_ADDR_FLAG_FULLMESH _BITUL(3)
+#define MPTCP_PM_ADDR_FLAG_IMPLICIT _BITUL(4)
+#define MPTCP_PM_ADDR_FLAG_LAMINAR _BITUL(5)
struct mptcp_info {
__u8 mptcpi_subflows;
+ #define mptcpi_extra_subflows mptcpi_subflows
__u8 mptcpi_add_addr_signal;
__u8 mptcpi_add_addr_accepted;
__u8 mptcpi_subflows_max;
+ #define mptcpi_limit_extra_subflows mptcpi_subflows_max
__u8 mptcpi_add_addr_signal_max;
+ #define mptcpi_endp_signal_max mptcpi_add_addr_signal_max
__u8 mptcpi_add_addr_accepted_max;
+ #define mptcpi_limit_add_addr_accepted mptcpi_add_addr_accepted_max
+ /* 16-bit hole that can no longer be filled */
__u32 mptcpi_flags;
__u32 mptcpi_token;
__u64 mptcpi_write_seq;
@@ -53,14 +60,17 @@ struct mptcp_info {
__u64 mptcpi_rcv_nxt;
__u8 mptcpi_local_addr_used;
__u8 mptcpi_local_addr_max;
+ #define mptcpi_endp_subflow_max mptcpi_local_addr_max
__u8 mptcpi_csum_enabled;
+ /* 8-bit hole that can no longer be filled */
__u32 mptcpi_retransmits;
__u64 mptcpi_bytes_retrans;
__u64 mptcpi_bytes_sent;
__u64 mptcpi_bytes_received;
__u64 mptcpi_bytes_acked;
__u8 mptcpi_subflows_total;
- __u8 reserved[3];
+ __u8 mptcpi_endp_laminar_max;
+ __u8 reserved[2];
__u32 mptcpi_last_data_sent;
__u32 mptcpi_last_data_recv;
__u32 mptcpi_last_ack_recv;
diff --git a/include/uapi/linux/mptcp_pm.h b/include/uapi/linux/mptcp_pm.h
index 7359d34da446..bf44a5cf5b5a 100644
--- a/include/uapi/linux/mptcp_pm.h
+++ b/include/uapi/linux/mptcp_pm.h
@@ -16,10 +16,10 @@
* good time to allocate memory and send ADD_ADDR if needed. Depending on the
* traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
* is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport, server-side, [flags].
+ * sport, dport, [server-side], [flags].
* @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new
* subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport, server-side, [flags].
+ * sport, dport, [server-side], [flags].
* @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token.
* @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer.
* Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 8e0eb832bc01..7c0c915f0306 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -959,6 +959,7 @@ enum nft_exthdr_attributes {
* @NFT_META_SDIF: slave device interface index
* @NFT_META_SDIFNAME: slave device interface name
* @NFT_META_BRI_BROUTE: packet br_netfilter_broute bit
+ * @NFT_META_BRI_IIFHWADDR: packet input bridge interface ethernet address
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -999,6 +1000,7 @@ enum nft_meta_keys {
NFT_META_SDIFNAME,
NFT_META_BRI_BROUTE,
__NFT_META_IIFTYPE,
+ NFT_META_BRI_IIFHWADDR,
};
/**
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index d1a14f2892d9..8134f10e4e6c 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1085,8 +1085,9 @@
* %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
* %NL80211_ATTR_BANDS attributes. If %NL80211_ATTR_BANDS is
* omitted or set to 0, it means don't-care and the device will
- * decide what to use. After this command NAN functions can be
- * added.
+ * decide what to use. Additional cluster configuration may be
+ * optionally provided with %NL80211_ATTR_NAN_CONFIG.
+ * After this command NAN functions can be added.
* @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by
* its %NL80211_ATTR_WDEV interface.
* @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined
@@ -1115,6 +1116,10 @@
* current configuration is not changed. If it is present but
* set to zero, the configuration is changed to don't-care
* (i.e. the device can decide what to do).
+ * Additional parameters may be provided with
+ * %NL80211_ATTR_NAN_CONFIG. User space should provide all previously
+ * configured nested attributes under %NL80211_ATTR_NAN_CONFIG, even if
+ * only a subset was changed.
* @NL80211_CMD_NAN_MATCH: Notification sent when a match is reported.
* This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
* %NL80211_ATTR_COOKIE.
@@ -1344,6 +1349,18 @@
* control EPCS configuration. Used to notify userland on the current state
* of EPCS.
*
+ * @NL80211_CMD_NAN_NEXT_DW_NOTIFICATION: This command is used to notify
+ * user space about the next NAN Discovery Window (DW). User space may use
+ * it to prepare frames to be sent in the next DW.
+ * %NL80211_ATTR_WIPHY_FREQ is used to indicate the frequency of the next
+ * DW. SDF transmission should be requested with %NL80211_CMD_FRAME and
+ * the device/driver shall take care of the actual transmission timing.
+ * This notification is only sent to the NAN interface owning socket
+ * (see %NL80211_ATTR_SOCKET_OWNER flag).
+ * @NL80211_CMD_NAN_CLUSTER_JOINED: This command is used to notify
+ * user space that the NAN new cluster has been joined. The cluster ID is
+ * indicated by %NL80211_ATTR_MAC.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1604,6 +1621,9 @@ enum nl80211_commands {
NL80211_CMD_ASSOC_MLO_RECONF,
NL80211_CMD_EPCS_CFG,
+ NL80211_CMD_NAN_NEXT_DW_NOTIFICATION,
+ NL80211_CMD_NAN_CLUSTER_JOINED,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1943,8 +1963,9 @@ enum nl80211_commands {
* The driver must also specify support for this with the extended
* features NL80211_EXT_FEATURE_BEACON_RATE_LEGACY,
* NL80211_EXT_FEATURE_BEACON_RATE_HT,
- * NL80211_EXT_FEATURE_BEACON_RATE_VHT and
- * NL80211_EXT_FEATURE_BEACON_RATE_HE.
+ * NL80211_EXT_FEATURE_BEACON_RATE_VHT,
+ * NL80211_EXT_FEATURE_BEACON_RATE_HE and
+ * NL80211_EXT_FEATURE_BEACON_RATE_EHT.
*
* @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain
* at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME.
@@ -2283,7 +2304,8 @@ enum nl80211_commands {
* @NL80211_ATTR_PEER_AID: Association ID for the peer TDLS station (u16).
* This is similar to @NL80211_ATTR_STA_AID but with a difference of being
* allowed to be used with the first @NL80211_CMD_SET_STATION command to
- * update a TDLS peer STA entry.
+ * update a TDLS peer STA entry. For S1G interfaces, this is limited to
+ * 1600 for the current mac80211 implementation.
*
* @NL80211_ATTR_COALESCE_RULE: Coalesce rule information.
*
@@ -2928,6 +2950,29 @@ enum nl80211_commands {
* required alongside this attribute. Refer to
* @enum nl80211_s1g_short_beacon_attrs for the attribute definitions.
*
+ * @NL80211_ATTR_BSS_PARAM: nested attribute used with %NL80211_CMD_GET_WIPHY
+ * which indicates which BSS parameters can be modified. The attribute can
+ * also be used as flag attribute by user-space in %NL80211_CMD_SET_BSS to
+ * indicate that it wants strict checking on the BSS parameters to be
+ * modified.
+ *
+ * @NL80211_ATTR_NAN_CONFIG: Nested attribute for
+ * extended NAN cluster configuration. This is used with
+ * %NL80211_CMD_START_NAN and %NL80211_CMD_CHANGE_NAN_CONFIG.
+ * See &enum nl80211_nan_conf_attributes for details.
+ * This attribute is optional.
+ * @NL80211_ATTR_NAN_NEW_CLUSTER: Flag attribute indicating that a new
+ * NAN cluster has been created. This is used with
+ * %NL80211_CMD_NAN_CLUSTER_JOINED
+ * @NL80211_ATTR_NAN_CAPABILITIES: Nested attribute for NAN capabilities.
+ * This is used with %NL80211_CMD_GET_WIPHY to indicate the NAN
+ * capabilities supported by the driver. See &enum nl80211_nan_capabilities
+ * for details.
+ *
+ * @NL80211_ATTR_S1G_PRIMARY_2MHZ: flag attribute indicating that the S1G
+ * primary channel is 2 MHz wide, and the control channel designates
+ * the 1 MHz primary subchannel within that 2 MHz primary.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3489,6 +3534,12 @@ enum nl80211_attrs {
NL80211_ATTR_S1G_LONG_BEACON_PERIOD,
NL80211_ATTR_S1G_SHORT_BEACON,
+ NL80211_ATTR_BSS_PARAM,
+ NL80211_ATTR_NAN_CONFIG,
+ NL80211_ATTR_NAN_NEW_CLUSTER,
+ NL80211_ATTR_NAN_CAPABILITIES,
+
+ NL80211_ATTR_S1G_PRIMARY_2MHZ,
/* add attributes here, update the policy in nl80211.c */
@@ -3736,6 +3787,22 @@ enum nl80211_eht_gi {
};
/**
+ * enum nl80211_eht_ltf - EHT long training field
+ * @NL80211_RATE_INFO_EHT_1XLTF: 3.2 usec
+ * @NL80211_RATE_INFO_EHT_2XLTF: 6.4 usec
+ * @NL80211_RATE_INFO_EHT_4XLTF: 12.8 usec
+ * @NL80211_RATE_INFO_EHT_6XLTF: 19.2 usec
+ * @NL80211_RATE_INFO_EHT_8XLTF: 25.6 usec
+ */
+enum nl80211_eht_ltf {
+ NL80211_RATE_INFO_EHT_1XLTF,
+ NL80211_RATE_INFO_EHT_2XLTF,
+ NL80211_RATE_INFO_EHT_4XLTF,
+ NL80211_RATE_INFO_EHT_6XLTF,
+ NL80211_RATE_INFO_EHT_8XLTF,
+};
+
+/**
* enum nl80211_eht_ru_alloc - EHT RU allocation values
* @NL80211_RATE_INFO_EHT_RU_ALLOC_26: 26-tone RU allocation
* @NL80211_RATE_INFO_EHT_RU_ALLOC_52: 52-tone RU allocation
@@ -4371,6 +4438,12 @@ enum nl80211_wmm_rule {
* very low power (VLP) AP, despite being NO_IR.
* @NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY: This channel can be active in
* 20 MHz bandwidth, despite being NO_IR.
+ * @NL80211_FREQUENCY_ATTR_NO_4MHZ: 4 MHz operation is not allowed on this
+ * channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_NO_8MHZ: 8 MHz operation is not allowed on this
+ * channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_NO_16MHZ: 16 MHz operation is not allowed on this
+ * channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -4416,6 +4489,9 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_CAN_MONITOR,
NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP,
NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY,
+ NL80211_FREQUENCY_ATTR_NO_4MHZ,
+ NL80211_FREQUENCY_ATTR_NO_8MHZ,
+ NL80211_FREQUENCY_ATTR_NO_16MHZ,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -5481,6 +5557,10 @@ enum nl80211_key_attributes {
* see &struct nl80211_txrate_he
* @NL80211_TXRATE_HE_GI: configure HE GI, 0.8us, 1.6us and 3.2us.
* @NL80211_TXRATE_HE_LTF: configure HE LTF, 1XLTF, 2XLTF and 4XLTF.
+ * @NL80211_TXRATE_EHT: EHT rates allowed for TX rate selection,
+ * see &struct nl80211_txrate_eht
+ * @NL80211_TXRATE_EHT_GI: configure EHT GI, (u8, see &enum nl80211_eht_gi)
+ * @NL80211_TXRATE_EHT_LTF: configure EHT LTF, (u8, see &enum nl80211_eht_ltf)
* @__NL80211_TXRATE_AFTER_LAST: internal
* @NL80211_TXRATE_MAX: highest TX rate attribute
*/
@@ -5493,6 +5573,9 @@ enum nl80211_tx_rate_attributes {
NL80211_TXRATE_HE,
NL80211_TXRATE_HE_GI,
NL80211_TXRATE_HE_LTF,
+ NL80211_TXRATE_EHT,
+ NL80211_TXRATE_EHT_GI,
+ NL80211_TXRATE_EHT_LTF,
/* keep last */
__NL80211_TXRATE_AFTER_LAST,
@@ -5525,6 +5608,15 @@ enum nl80211_txrate_gi {
NL80211_TXRATE_FORCE_LGI,
};
+#define NL80211_EHT_NSS_MAX 16
+/**
+ * struct nl80211_txrate_eht - EHT MCS/NSS txrate bitmap
+ * @mcs: MCS bitmap table for each NSS (array index 0 for 1 stream, etc.)
+ */
+struct nl80211_txrate_eht {
+ __u16 mcs[NL80211_EHT_NSS_MAX];
+};
+
/**
* enum nl80211_band - Frequency band
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
@@ -6649,6 +6741,9 @@ enum nl80211_feature_flags {
* (signaling and payload protected) A-MSDUs and this shall be advertised
* in the RSNXE.
*
+ * @NL80211_EXT_FEATURE_BEACON_RATE_EHT: Driver supports beacon rate
+ * configuration (AP/mesh) with EHT rates.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -6724,6 +6819,7 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_OWE_OFFLOAD_AP,
NL80211_EXT_FEATURE_DFS_CONCURRENT,
NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT,
+ NL80211_EXT_FEATURE_BEACON_RATE_EHT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -7279,6 +7375,105 @@ enum nl80211_nan_match_attributes {
};
/**
+ * enum nl80211_nan_band_conf_attributes - NAN band configuration attributes
+ * @__NL80211_NAN_BAND_CONF_INVALID: Invalid.
+ * @NL80211_NAN_BAND_CONF_BAND: Band for which the configuration is
+ * being set. The value is according to &enum nl80211_band (u8).
+ * @NL80211_NAN_BAND_CONF_FREQ: Discovery frequency. This attribute shall not
+ * be present on 2.4 GHZ band. On 5 GHz band its presence is optional.
+ * The allowed values are 5220 (channel 44) or 5745 (channel 149).
+ * If not present, channel 149 is used if allowed, otherwise channel 44
+ * will be selected. The value is in MHz (u16).
+ * @NL80211_NAN_BAND_CONF_RSSI_CLOSE: RSSI close threshold used for NAN state
+ * transition algorithm as described in chapters 3.3.6 and 3.3.7 "NAN
+ * Device Role and State Transition" of Wi-Fi Aware (TM) Specification
+ * v4.0. If not specified, default device value is used. The value should
+ * be greater than -60 dBm (s8).
+ * @NL80211_NAN_BAND_CONF_RSSI_MIDDLE: RSSI middle threshold used for NAN state
+ * transition algorithm as described in chapters 3.3.6 and 3.3.7 "NAN
+ * Device Role and State Transition" of Wi-Fi Aware (TM) Specification
+ * v4.0. If not present, default device value is used. The value should be
+ * greater than -75 dBm and less than %NL80211_NAN_BAND_CONF_RSSI_CLOSE
+ * (s8).
+ * @NL80211_NAN_BAND_CONF_WAKE_DW: Committed DW information (values 0-5).
+ * Value 0 means that the device will not wake up during the
+ * discovery window. Values 1-5 mean that the device will wake up
+ * during each 2^(n - 1) discovery window, where n is the value of
+ * this attribute. Setting this attribute to 0 is not allowed on
+ * 2.4 GHz band (u8). This is an optional parameter (default is 1).
+ * @NL80211_NAN_BAND_CONF_DISABLE_SCAN: Optional flag attribute to disable
+ * scanning (for cluster merge) on the band. If set, the device will not
+ * scan on this band anymore. Disabling scanning on 2.4 GHz band is not
+ * allowed.
+ * @NUM_NL80211_NAN_BAND_CONF_ATTR: Internal.
+ * @NL80211_NAN_BAND_CONF_ATTR_MAX: Highest NAN band configuration attribute.
+ *
+ * These attributes are used to configure NAN band-specific parameters. Note,
+ * that both RSSI attributes should be configured (or both left unset).
+ */
+enum nl80211_nan_band_conf_attributes {
+ __NL80211_NAN_BAND_CONF_INVALID,
+ NL80211_NAN_BAND_CONF_BAND,
+ NL80211_NAN_BAND_CONF_FREQ,
+ NL80211_NAN_BAND_CONF_RSSI_CLOSE,
+ NL80211_NAN_BAND_CONF_RSSI_MIDDLE,
+ NL80211_NAN_BAND_CONF_WAKE_DW,
+ NL80211_NAN_BAND_CONF_DISABLE_SCAN,
+
+ /* keep last */
+ NUM_NL80211_NAN_BAND_CONF_ATTR,
+ NL80211_NAN_BAND_CONF_ATTR_MAX = NUM_NL80211_NAN_BAND_CONF_ATTR - 1,
+};
+
+/**
+ * enum nl80211_nan_conf_attributes - NAN configuration attributes
+ * @__NL80211_NAN_CONF_INVALID: Invalid attribute, used for validation.
+ * @NL80211_NAN_CONF_CLUSTER_ID: ID for the NAN cluster. This is a MAC
+ * address that can take values from 50-6F-9A-01-00-00 to
+ * 50-6F-9A-01-FF-FF. This attribute is optional. If not present,
+ * a random Cluster ID will be chosen.
+ * @NL80211_NAN_CONF_EXTRA_ATTRS: Additional NAN attributes to be
+ * published in the beacons. This is an optional byte array.
+ * @NL80211_NAN_CONF_VENDOR_ELEMS: Vendor-specific elements that will
+ * be published in the beacons. This is an optional byte array.
+ * @NL80211_NAN_CONF_BAND_CONFIGS: This is a nested array attribute,
+ * containing multiple entries for each supported band. Each band
+ * configuration consists of &enum nl80211_nan_band_conf_attributes.
+ * @NL80211_NAN_CONF_SCAN_PERIOD: Scan period in seconds. If not configured,
+ * device default is used. Zero value will disable scanning.
+ * This is u16 (optional).
+ * @NL80211_NAN_CONF_SCAN_DWELL_TIME: Scan dwell time in TUs per channel.
+ * Only non-zero values are valid. If not configured the device default
+ * value is used. This is u16 (optional)
+ * @NL80211_NAN_CONF_DISCOVERY_BEACON_INTERVAL: Discovery beacon interval
+ * in TUs. Valid range is 50-200 TUs. If not configured the device default
+ * value is used. This is u8 (optional)
+ * @NL80211_NAN_CONF_NOTIFY_DW: If set, the driver will notify userspace about
+ * the upcoming discovery window with
+ * %NL80211_CMD_NAN_NEXT_DW_NOTIFICATION.
+ * This is a flag attribute.
+ * @NUM_NL80211_NAN_CONF_ATTR: Internal.
+ * @NL80211_NAN_CONF_ATTR_MAX: Highest NAN configuration attribute.
+ *
+ * These attributes are used to configure NAN-specific parameters.
+ */
+enum nl80211_nan_conf_attributes {
+ __NL80211_NAN_CONF_INVALID,
+ NL80211_NAN_CONF_CLUSTER_ID,
+ NL80211_NAN_CONF_EXTRA_ATTRS,
+ NL80211_NAN_CONF_VENDOR_ELEMS,
+ NL80211_NAN_CONF_BAND_CONFIGS,
+ NL80211_NAN_CONF_SCAN_PERIOD,
+ NL80211_NAN_CONF_SCAN_DWELL_TIME,
+ NL80211_NAN_CONF_DISCOVERY_BEACON_INTERVAL,
+ NL80211_NAN_CONF_NOTIFY_DW,
+
+ /* keep last */
+ NUM_NL80211_NAN_CONF_ATTR,
+ NL80211_NAN_CONF_ATTR_MAX = NUM_NL80211_NAN_CONF_ATTR - 1,
+};
+
+/**
* enum nl80211_external_auth_action - Action to perform with external
* authentication request. Used by NL80211_ATTR_EXTERNAL_AUTH_ACTION.
* @NL80211_EXTERNAL_AUTH_START: Start the authentication.
@@ -8187,4 +8382,54 @@ enum nl80211_s1g_short_beacon_attrs {
__NL80211_S1G_SHORT_BEACON_ATTR_LAST - 1
};
+/**
+ * enum nl80211_nan_capabilities - NAN (Neighbor Aware Networking)
+ * capabilities.
+ *
+ * @__NL80211_NAN_CAPABILITIES_INVALID: Invalid.
+ * @NL80211_NAN_CAPA_CONFIGURABLE_SYNC: Flag attribute indicating that
+ * the device supports configurable synchronization. If set, the device
+ * should be able to handle %NL80211_ATTR_NAN_CONFIG
+ * attribute in the %NL80211_CMD_START_NAN (and change) command.
+ * @NL80211_NAN_CAPA_USERSPACE_DE: Flag attribute indicating that
+ * NAN Discovery Engine (DE) is not offloaded and the driver assumes
+ * user space DE implementation. When set, %NL80211_CMD_ADD_NAN_FUNCTION,
+ * %NL80211_CMD_DEL_NAN_FUNCTION and %NL80211_CMD_NAN_MATCH commands
+ * should not be used. In addition, the device/driver should support
+ * sending discovery window (DW) notifications using
+ * %NL80211_CMD_NAN_NEXT_DW_NOTIFICATION and handling transmission and
+ * reception of NAN SDF frames on NAN device interface during DW windows.
+ * (%NL80211_CMD_FRAME is used to transmit SDFs)
+ * @NL80211_NAN_CAPA_OP_MODE: u8 attribute indicating the supported operation
+ * modes as defined in Wi-Fi Aware (TM) specification Table 81 (Operation
+ * Mode field format).
+ * @NL80211_NAN_CAPA_NUM_ANTENNAS: u8 attribute indicating the number of
+ * TX and RX antennas supported by the device. Lower nibble indicates
+ * the number of TX antennas and upper nibble indicates the number of RX
+ * antennas. Value 0 indicates the information is not available.
+ * See table 79 of Wi-Fi Aware (TM) specification (Number of
+ * Antennas field).
+ * @NL80211_NAN_CAPA_MAX_CHANNEL_SWITCH_TIME: u16 attribute indicating the
+ * maximum time in microseconds that the device requires to switch
+ * channels.
+ * @NL80211_NAN_CAPA_CAPABILITIES: u8 attribute containing the
+ * capabilities of the device as defined in Wi-Fi Aware (TM)
+ * specification Table 79 (Capabilities field).
+ * @__NL80211_NAN_CAPABILITIES_LAST: Internal
+ * @NL80211_NAN_CAPABILITIES_MAX: Highest NAN capability attribute.
+ */
+enum nl80211_nan_capabilities {
+ __NL80211_NAN_CAPABILITIES_INVALID,
+
+ NL80211_NAN_CAPA_CONFIGURABLE_SYNC,
+ NL80211_NAN_CAPA_USERSPACE_DE,
+ NL80211_NAN_CAPA_OP_MODE,
+ NL80211_NAN_CAPA_NUM_ANTENNAS,
+ NL80211_NAN_CAPA_MAX_CHANNEL_SWITCH_TIME,
+ NL80211_NAN_CAPA_CAPABILITIES,
+ /* keep last */
+ __NL80211_NAN_CAPABILITIES_LAST,
+ NL80211_NAN_CAPABILITIES_MAX = __NL80211_NAN_CAPABILITIES_LAST - 1,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index ed3aed264aeb..51c4e8c82b1e 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -177,7 +177,17 @@ struct prctl_mm_map {
#define PR_GET_TID_ADDRESS 40
+/*
+ * Flags for PR_SET_THP_DISABLE are only applicable when disabling. Bit 0
+ * is reserved, so PR_GET_THP_DISABLE can return "1 | flags", to effectively
+ * return "1" when no flags were specified for PR_SET_THP_DISABLE.
+ */
#define PR_SET_THP_DISABLE 41
+/*
+ * Don't disable THPs when explicitly advised (e.g., MADV_HUGEPAGE /
+ * VM_HUGEPAGE, MADV_COLLAPSE).
+ */
+# define PR_THP_DISABLE_EXCEPT_ADVISED (1 << 1)
#define PR_GET_THP_DISABLE 42
/*
diff --git a/include/uapi/linux/psp.h b/include/uapi/linux/psp.h
new file mode 100644
index 000000000000..607c42c39ba5
--- /dev/null
+++ b/include/uapi/linux/psp.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/psp.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_PSP_H
+#define _UAPI_LINUX_PSP_H
+
+#define PSP_FAMILY_NAME "psp"
+#define PSP_FAMILY_VERSION 1
+
+enum psp_version {
+ PSP_VERSION_HDR0_AES_GCM_128,
+ PSP_VERSION_HDR0_AES_GCM_256,
+ PSP_VERSION_HDR0_AES_GMAC_128,
+ PSP_VERSION_HDR0_AES_GMAC_256,
+};
+
+enum {
+ PSP_A_DEV_ID = 1,
+ PSP_A_DEV_IFINDEX,
+ PSP_A_DEV_PSP_VERSIONS_CAP,
+ PSP_A_DEV_PSP_VERSIONS_ENA,
+
+ __PSP_A_DEV_MAX,
+ PSP_A_DEV_MAX = (__PSP_A_DEV_MAX - 1)
+};
+
+enum {
+ PSP_A_ASSOC_DEV_ID = 1,
+ PSP_A_ASSOC_VERSION,
+ PSP_A_ASSOC_RX_KEY,
+ PSP_A_ASSOC_TX_KEY,
+ PSP_A_ASSOC_SOCK_FD,
+
+ __PSP_A_ASSOC_MAX,
+ PSP_A_ASSOC_MAX = (__PSP_A_ASSOC_MAX - 1)
+};
+
+enum {
+ PSP_A_KEYS_KEY = 1,
+ PSP_A_KEYS_SPI,
+
+ __PSP_A_KEYS_MAX,
+ PSP_A_KEYS_MAX = (__PSP_A_KEYS_MAX - 1)
+};
+
+enum {
+ PSP_CMD_DEV_GET = 1,
+ PSP_CMD_DEV_ADD_NTF,
+ PSP_CMD_DEV_DEL_NTF,
+ PSP_CMD_DEV_SET,
+ PSP_CMD_DEV_CHANGE_NTF,
+ PSP_CMD_KEY_ROTATE,
+ PSP_CMD_KEY_ROTATE_NTF,
+ PSP_CMD_RX_ASSOC,
+ PSP_CMD_TX_ASSOC,
+
+ __PSP_CMD_MAX,
+ PSP_CMD_MAX = (__PSP_CMD_MAX - 1)
+};
+
+#define PSP_MCGRP_MGMT "mgmt"
+#define PSP_MCGRP_USE "use"
+
+#endif /* _UAPI_LINUX_PSP_H */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 2c3346e91dbe..46d45f902486 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -248,6 +248,10 @@ struct ptp_pin_desc {
_IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended)
#define PTP_MASK_CLEAR_ALL _IO(PTP_CLK_MAGIC, 19)
#define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int)
+#define PTP_SYS_OFFSET_PRECISE_CYCLES \
+ _IOWR(PTP_CLK_MAGIC, 21, struct ptp_sys_offset_precise)
+#define PTP_SYS_OFFSET_EXTENDED_CYCLES \
+ _IOWR(PTP_CLK_MAGIC, 22, struct ptp_sys_offset_extended)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occurred. */
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index b87df1b485c2..9a28f7d9a334 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -2,7 +2,9 @@
#ifndef _UAPI_LINUX_STDDEF_H
#define _UAPI_LINUX_STDDEF_H
+#ifdef __KERNEL__
#include <linux/compiler_types.h>
+#endif
#ifndef __always_inline
#define __always_inline inline
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index bdac8c42fa82..dce3113787a7 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -316,6 +316,15 @@ struct tcp_info {
* in milliseconds, including any
* unfinished recovery.
*/
+ __u32 tcpi_received_ce; /* # of CE marks received */
+ __u32 tcpi_delivered_e1_bytes; /* Accurate ECN byte counters */
+ __u32 tcpi_delivered_e0_bytes;
+ __u32 tcpi_delivered_ce_bytes;
+ __u32 tcpi_received_e1_bytes;
+ __u32 tcpi_received_e0_bytes;
+ __u32 tcpi_received_ce_bytes;
+ __u16 tcpi_accecn_fail_mode;
+ __u16 tcpi_accecn_opt_seen;
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index d0430bee8292..386ad36f1a0a 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -42,14 +42,16 @@
#define TEE_IOC_MAGIC 0xa4
#define TEE_IOC_BASE 0
-#define TEE_MAX_ARG_SIZE 1024
+#define TEE_MAX_ARG_SIZE 4096
#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
#define TEE_GEN_CAP_MEMREF_NULL (1 << 3)/* NULL MemRef support */
+#define TEE_GEN_CAP_OBJREF (1 << 4)/* Supports generic object reference */
-#define TEE_MEMREF_NULL (__u64)(-1) /* NULL MemRef Buffer */
+#define TEE_MEMREF_NULL ((__u64)(-1)) /* NULL MemRef Buffer */
+#define TEE_OBJREF_NULL ((__u64)(-1)) /* NULL ObjRef Object */
/*
* TEE Implementation ID
@@ -57,6 +59,7 @@
#define TEE_IMPL_ID_OPTEE 1
#define TEE_IMPL_ID_AMDTEE 2
#define TEE_IMPL_ID_TSTEE 3
+#define TEE_IMPL_ID_QTEE 4
/*
* OP-TEE specific capabilities
@@ -152,6 +155,20 @@ struct tee_ioctl_buf_data {
#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */
/*
+ * These defines userspace buffer parameters.
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT 8
+#define TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT 9
+#define TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT 10 /* input and output */
+
+/*
+ * These defines object reference parameters.
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT 11
+#define TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT 12
+#define TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT 13
+
+/*
* Mask for the type part of the attribute, leaves room for more types
*/
#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
@@ -186,14 +203,18 @@ struct tee_ioctl_buf_data {
/**
* struct tee_ioctl_param - parameter
* @attr: attributes
- * @a: if a memref, offset into the shared memory object, else a value parameter
- * @b: if a memref, size of the buffer, else a value parameter
+ * @a: if a memref, offset into the shared memory object,
+ * else if a ubuf, address of the user buffer,
+ * else if an objref, object identifier, else a value parameter
+ * @b: if a memref or ubuf, size of the buffer,
+ * else if objref, flags for the object, else a value parameter
* @c: if a memref, shared memory identifier, else a value parameter
*
- * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in
- * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and
- * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE
- * indicates that none of the members are used.
+ * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref, ubuf, or value is
+ * used in the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value,
+ * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref, TEE_PARAM_ATTR_TYPE_UBUF_*
+ * indicates ubuf, and TEE_PARAM_ATTR_TYPE_OBJREF_* indicates objref.
+ * TEE_PARAM_ATTR_TYPE_NONE indicates that none of the members are used.
*
* Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an
* identifier representing the shared memory object. A memref can reference
@@ -379,6 +400,37 @@ struct tee_ioctl_shm_register_data {
};
/**
+ * struct tee_ioctl_shm_register_fd_data - Shared memory registering argument
+ * @fd: [in] File descriptor identifying dmabuf reference
+ * @size: [out] Size of referenced memory
+ * @flags: [in] Flags to/from allocation.
+ * @id: [out] Identifier of the shared memory
+ *
+ * The flags field should currently be zero as input. Updated by the call
+ * with actual flags as defined by TEE_IOCTL_SHM_* above.
+ * This structure is used as argument for TEE_IOC_SHM_REGISTER_FD below.
+ */
+struct tee_ioctl_shm_register_fd_data {
+ __s64 fd;
+ __u64 size;
+ __u32 flags;
+ __s32 id;
+};
+
+/**
+ * TEE_IOC_SHM_REGISTER_FD - register a shared memory from a file descriptor
+ *
+ * Returns a file descriptor on success or < 0 on failure
+ *
+ * The returned file descriptor refers to the shared memory object in the
+ * kernel. The supplied file deccriptor can be closed if it's not needed
+ * for other purposes. The shared memory is freed when the descriptor is
+ * closed.
+ */
+#define TEE_IOC_SHM_REGISTER_FD _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 8, \
+ struct tee_ioctl_shm_register_fd_data)
+
+/**
* TEE_IOC_SHM_REGISTER - Register shared memory argument
*
* Registers shared memory between the user space process and secure OS.
@@ -401,4 +453,23 @@ struct tee_ioctl_shm_register_data {
* munmap(): unmaps previously shared memory
*/
+/**
+ * struct tee_ioctl_invoke_func_arg - Invokes an object in a Trusted Application
+ * @id: [in] Object id
+ * @op: [in] Object operation, specific to the object
+ * @ret: [out] return value
+ * @num_params: [in] number of parameters following this struct
+ */
+struct tee_ioctl_object_invoke_arg {
+ __u64 id;
+ __u32 op;
+ __u32 ret;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+#define TEE_IOC_OBJECT_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 10, \
+ struct tee_ioctl_buf_data)
+
#endif /*__TEE_H*/
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index f836512e9deb..2d30107e047e 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1193,7 +1193,7 @@ enum v4l2_flash_strobe_source {
#define V4L2_CID_JPEG_CLASS_BASE (V4L2_CTRL_CLASS_JPEG | 0x900)
#define V4L2_CID_JPEG_CLASS (V4L2_CTRL_CLASS_JPEG | 1)
-#define V4L2_CID_JPEG_CHROMA_SUBSAMPLING (V4L2_CID_JPEG_CLASS_BASE + 1)
+#define V4L2_CID_JPEG_CHROMA_SUBSAMPLING (V4L2_CID_JPEG_CLASS_BASE + 1)
enum v4l2_jpeg_chroma_subsampling {
V4L2_JPEG_CHROMA_SUBSAMPLING_444 = 0,
V4L2_JPEG_CHROMA_SUBSAMPLING_422 = 1,
@@ -1202,15 +1202,15 @@ enum v4l2_jpeg_chroma_subsampling {
V4L2_JPEG_CHROMA_SUBSAMPLING_410 = 4,
V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY = 5,
};
-#define V4L2_CID_JPEG_RESTART_INTERVAL (V4L2_CID_JPEG_CLASS_BASE + 2)
-#define V4L2_CID_JPEG_COMPRESSION_QUALITY (V4L2_CID_JPEG_CLASS_BASE + 3)
+#define V4L2_CID_JPEG_RESTART_INTERVAL (V4L2_CID_JPEG_CLASS_BASE + 2)
+#define V4L2_CID_JPEG_COMPRESSION_QUALITY (V4L2_CID_JPEG_CLASS_BASE + 3)
-#define V4L2_CID_JPEG_ACTIVE_MARKER (V4L2_CID_JPEG_CLASS_BASE + 4)
-#define V4L2_JPEG_ACTIVE_MARKER_APP0 (1 << 0)
-#define V4L2_JPEG_ACTIVE_MARKER_APP1 (1 << 1)
-#define V4L2_JPEG_ACTIVE_MARKER_COM (1 << 16)
-#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17)
-#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18)
+#define V4L2_CID_JPEG_ACTIVE_MARKER (V4L2_CID_JPEG_CLASS_BASE + 4)
+#define V4L2_JPEG_ACTIVE_MARKER_APP0 (1 << 0)
+#define V4L2_JPEG_ACTIVE_MARKER_APP1 (1 << 1)
+#define V4L2_JPEG_ACTIVE_MARKER_COM (1 << 16)
+#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17)
+#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18)
/* Image source controls */
@@ -1243,10 +1243,10 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900)
#define V4L2_CID_DV_CLASS (V4L2_CTRL_CLASS_DV | 1)
-#define V4L2_CID_DV_TX_HOTPLUG (V4L2_CID_DV_CLASS_BASE + 1)
-#define V4L2_CID_DV_TX_RXSENSE (V4L2_CID_DV_CLASS_BASE + 2)
-#define V4L2_CID_DV_TX_EDID_PRESENT (V4L2_CID_DV_CLASS_BASE + 3)
-#define V4L2_CID_DV_TX_MODE (V4L2_CID_DV_CLASS_BASE + 4)
+#define V4L2_CID_DV_TX_HOTPLUG (V4L2_CID_DV_CLASS_BASE + 1)
+#define V4L2_CID_DV_TX_RXSENSE (V4L2_CID_DV_CLASS_BASE + 2)
+#define V4L2_CID_DV_TX_EDID_PRESENT (V4L2_CID_DV_CLASS_BASE + 3)
+#define V4L2_CID_DV_TX_MODE (V4L2_CID_DV_CLASS_BASE + 4)
enum v4l2_dv_tx_mode {
V4L2_DV_TX_MODE_DVI_D = 0,
V4L2_DV_TX_MODE_HDMI = 1,
@@ -1267,7 +1267,7 @@ enum v4l2_dv_it_content_type {
V4L2_DV_IT_CONTENT_TYPE_NO_ITC = 4,
};
-#define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100)
+#define V4L2_CID_DV_RX_POWER_PRESENT (V4L2_CID_DV_CLASS_BASE + 100)
#define V4L2_CID_DV_RX_RGB_RANGE (V4L2_CID_DV_CLASS_BASE + 101)
#define V4L2_CID_DV_RX_IT_CONTENT_TYPE (V4L2_CID_DV_CLASS_BASE + 102)
@@ -1537,15 +1537,6 @@ struct v4l2_ctrl_h264_pred_weights {
struct v4l2_h264_weight_factors weight_factors[2];
};
-#define V4L2_H264_SLICE_TYPE_P 0
-#define V4L2_H264_SLICE_TYPE_B 1
-#define V4L2_H264_SLICE_TYPE_I 2
-#define V4L2_H264_SLICE_TYPE_SP 3
-#define V4L2_H264_SLICE_TYPE_SI 4
-
-#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x01
-#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x02
-
#define V4L2_H264_TOP_FIELD_REF 0x1
#define V4L2_H264_BOTTOM_FIELD_REF 0x2
#define V4L2_H264_FRAME_REF 0x3
@@ -1566,8 +1557,17 @@ struct v4l2_h264_reference {
* Maximum DPB size, as specified by section 'A.3.1 Level limits
* common to the Baseline, Main, and Extended profiles'.
*/
-#define V4L2_H264_NUM_DPB_ENTRIES 16
-#define V4L2_H264_REF_LIST_LEN (2 * V4L2_H264_NUM_DPB_ENTRIES)
+#define V4L2_H264_NUM_DPB_ENTRIES 16
+#define V4L2_H264_REF_LIST_LEN (2 * V4L2_H264_NUM_DPB_ENTRIES)
+
+#define V4L2_H264_SLICE_TYPE_P 0
+#define V4L2_H264_SLICE_TYPE_B 1
+#define V4L2_H264_SLICE_TYPE_I 2
+#define V4L2_H264_SLICE_TYPE_SP 3
+#define V4L2_H264_SLICE_TYPE_SI 4
+
+#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x01
+#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x02
#define V4L2_CID_STATELESS_H264_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 6)
/**
@@ -1707,7 +1707,6 @@ struct v4l2_ctrl_h264_decode_params {
__u32 flags;
};
-
/* Stateless FWHT control, used by the vicodec driver */
/* Current FWHT version */
@@ -2549,44 +2548,10 @@ struct v4l2_ctrl_hevc_scaling_matrix {
__u8 scaling_list_dc_coef_32x32[2];
};
-#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
-#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1)
-
-#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO (V4L2_CID_COLORIMETRY_CLASS_BASE + 0)
-
-struct v4l2_ctrl_hdr10_cll_info {
- __u16 max_content_light_level;
- __u16 max_pic_average_light_level;
-};
-
-#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY (V4L2_CID_COLORIMETRY_CLASS_BASE + 1)
-
-#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW 5
-#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH 37000
-#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW 5
-#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH 42000
-#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW 5
-#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH 37000
-#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW 5
-#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH 42000
-#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW 50000
-#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH 100000000
-#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW 1
-#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH 50000
-
-struct v4l2_ctrl_hdr10_mastering_display {
- __u16 display_primaries_x[3];
- __u16 display_primaries_y[3];
- __u16 white_point_x;
- __u16 white_point_y;
- __u32 max_display_mastering_luminance;
- __u32 min_display_mastering_luminance;
-};
-
/* Stateless VP9 controls */
#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED 0x1
-#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
+#define V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE 0x2
/**
* struct v4l2_vp9_loop_filter - VP9 loop filter parameters
@@ -3515,4 +3480,38 @@ struct v4l2_ctrl_av1_film_grain {
#define V4L2_CID_MPEG_MFC51_BASE V4L2_CID_CODEC_MFC51_BASE
#endif
+#define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900)
+#define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1)
+
+#define V4L2_CID_COLORIMETRY_HDR10_CLL_INFO (V4L2_CID_COLORIMETRY_CLASS_BASE + 0)
+
+struct v4l2_ctrl_hdr10_cll_info {
+ __u16 max_content_light_level;
+ __u16 max_pic_average_light_level;
+};
+
+#define V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY (V4L2_CID_COLORIMETRY_CLASS_BASE + 1)
+
+#define V4L2_HDR10_MASTERING_PRIMARIES_X_LOW 5
+#define V4L2_HDR10_MASTERING_PRIMARIES_X_HIGH 37000
+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_LOW 5
+#define V4L2_HDR10_MASTERING_PRIMARIES_Y_HIGH 42000
+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_LOW 5
+#define V4L2_HDR10_MASTERING_WHITE_POINT_X_HIGH 37000
+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_LOW 5
+#define V4L2_HDR10_MASTERING_WHITE_POINT_Y_HIGH 42000
+#define V4L2_HDR10_MASTERING_MAX_LUMA_LOW 50000
+#define V4L2_HDR10_MASTERING_MAX_LUMA_HIGH 100000000
+#define V4L2_HDR10_MASTERING_MIN_LUMA_LOW 1
+#define V4L2_HDR10_MASTERING_MIN_LUMA_HIGH 50000
+
+struct v4l2_ctrl_hdr10_mastering_display {
+ __u16 display_primaries_x[3];
+ __u16 display_primaries_y[3];
+ __u16 white_point_x;
+ __u16 white_point_y;
+ __u32 max_display_mastering_luminance;
+ __u32 min_display_mastering_luminance;
+};
+
#endif
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 44a16e0e5a12..58f478f98a35 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -2,7 +2,7 @@
/*
* V4L2 DV timings header.
*
- * Copyright (C) 2012-2016 Hans Verkuil <hansverk@cisco.com>
+ * Copyright (C) 2012-2016 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _V4L2_DV_TIMINGS_H
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 3dd9fa45dde1..becd08fdbddb 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -51,7 +51,7 @@
*
* Author: Bill Dirks <bill@thedirks.org>
* Justin Schoeman
- * Hans Verkuil <hverkuil@xs4all.nl>
+ * Hans Verkuil <hverkuil@kernel.org>
* et al.
*/
#ifndef _UAPI__LINUX_VIDEODEV2_H
@@ -1607,8 +1607,8 @@ struct v4l2_bt_timings {
} __attribute__ ((packed));
/* Interlaced or progressive format */
-#define V4L2_DV_PROGRESSIVE 0
-#define V4L2_DV_INTERLACED 1
+#define V4L2_DV_PROGRESSIVE 0
+#define V4L2_DV_INTERLACED 1
/* Polarities. If bit is not set, it is assumed to be negative polarity */
#define V4L2_DV_VSYNC_POS_POL 0x00000001
@@ -2788,15 +2788,15 @@ struct v4l2_remove_buffers {
* Only implemented if CONFIG_VIDEO_ADV_DEBUG is defined.
* You must be root to use these ioctls. Never use these in applications!
*/
-#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
-#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
+#define VIDIOC_DBG_S_REGISTER _IOW('V', 79, struct v4l2_dbg_register)
+#define VIDIOC_DBG_G_REGISTER _IOWR('V', 80, struct v4l2_dbg_register)
#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
-#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
-#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
-#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event)
-#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription)
-#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
+#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
+#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
+#define VIDIOC_DQEVENT _IOR('V', 89, struct v4l2_event)
+#define VIDIOC_SUBSCRIBE_EVENT _IOW('V', 90, struct v4l2_event_subscription)
+#define VIDIOC_UNSUBSCRIBE_EVENT _IOW('V', 91, struct v4l2_event_subscription)
#define VIDIOC_CREATE_BUFS _IOWR('V', 92, struct v4l2_create_buffers)
#define VIDIOC_PREPARE_BUF _IOWR('V', 93, struct v4l2_buffer)
#define VIDIOC_G_SELECTION _IOWR('V', 94, struct v4l2_selection)
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index fe15bc7e9f70..89e6a3f13191 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -255,6 +255,7 @@ enum rdma_driver_id {
RDMA_DRIVER_SIW,
RDMA_DRIVER_ERDMA,
RDMA_DRIVER_MANA,
+ RDMA_DRIVER_IONIC,
};
enum ib_uverbs_gid_type {
diff --git a/include/uapi/rdma/ib_user_sa.h b/include/uapi/rdma/ib_user_sa.h
index 435155d6e1c6..acfa20816bc6 100644
--- a/include/uapi/rdma/ib_user_sa.h
+++ b/include/uapi/rdma/ib_user_sa.h
@@ -74,4 +74,18 @@ struct ib_user_path_rec {
__u8 preference;
};
+struct ib_user_service_rec {
+ __be64 id;
+ __u8 gid[16];
+ __be16 pkey;
+ __u8 reserved[2];
+ __be32 lease;
+ __u8 key[16];
+ __u8 name[64];
+ __u8 data_8[16];
+ __be16 data_16[8];
+ __be32 data_32[4];
+ __be64 data_64[2];
+};
+
#endif /* IB_USER_SA_H */
diff --git a/include/uapi/rdma/ionic-abi.h b/include/uapi/rdma/ionic-abi.h
new file mode 100644
index 000000000000..7b589d3e9728
--- /dev/null
+++ b/include/uapi/rdma/ionic-abi.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc */
+
+#ifndef IONIC_ABI_H
+#define IONIC_ABI_H
+
+#include <linux/types.h>
+
+#define IONIC_ABI_VERSION 1
+
+#define IONIC_EXPDB_64 1
+#define IONIC_EXPDB_128 2
+#define IONIC_EXPDB_256 4
+#define IONIC_EXPDB_512 8
+
+#define IONIC_EXPDB_SQ 1
+#define IONIC_EXPDB_RQ 2
+
+#define IONIC_CMB_ENABLE 1
+#define IONIC_CMB_REQUIRE 2
+#define IONIC_CMB_EXPDB 4
+#define IONIC_CMB_WC 8
+#define IONIC_CMB_UC 16
+
+struct ionic_ctx_req {
+ __u32 rsvd[2];
+};
+
+struct ionic_ctx_resp {
+ __u32 rsvd;
+ __u32 page_shift;
+
+ __aligned_u64 dbell_offset;
+
+ __u16 version;
+ __u8 qp_opcodes;
+ __u8 admin_opcodes;
+
+ __u8 sq_qtype;
+ __u8 rq_qtype;
+ __u8 cq_qtype;
+ __u8 admin_qtype;
+
+ __u8 max_stride;
+ __u8 max_spec;
+ __u8 udma_count;
+ __u8 expdb_mask;
+ __u8 expdb_qtypes;
+
+ __u8 rsvd2[3];
+};
+
+struct ionic_qdesc {
+ __aligned_u64 addr;
+ __u32 size;
+ __u16 mask;
+ __u8 depth_log2;
+ __u8 stride_log2;
+};
+
+struct ionic_ah_resp {
+ __u32 ahid;
+ __u32 pad;
+};
+
+struct ionic_cq_req {
+ struct ionic_qdesc cq[2];
+ __u8 udma_mask;
+ __u8 rsvd[7];
+};
+
+struct ionic_cq_resp {
+ __u32 cqid[2];
+ __u8 udma_mask;
+ __u8 rsvd[7];
+};
+
+struct ionic_qp_req {
+ struct ionic_qdesc sq;
+ struct ionic_qdesc rq;
+ __u8 sq_spec;
+ __u8 rq_spec;
+ __u8 sq_cmb;
+ __u8 rq_cmb;
+ __u8 udma_mask;
+ __u8 rsvd[3];
+};
+
+struct ionic_qp_resp {
+ __u32 qpid;
+ __u8 sq_cmb;
+ __u8 rq_cmb;
+ __u8 udma_idx;
+ __u8 rsvd[1];
+ __aligned_u64 sq_cmb_offset;
+ __aligned_u64 rq_cmb_offset;
+};
+
+struct ionic_srq_req {
+ struct ionic_qdesc rq;
+ __u8 rq_spec;
+ __u8 rq_cmb;
+ __u8 udma_mask;
+ __u8 rsvd[5];
+};
+
+struct ionic_srq_resp {
+ __u32 qpid;
+ __u8 rq_cmb;
+ __u8 udma_idx;
+ __u8 rsvd[2];
+ __aligned_u64 rq_cmb_offset;
+};
+
+#endif /* IONIC_ABI_H */
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
index bb18f15489e3..f7788d33376b 100644
--- a/include/uapi/rdma/irdma-abi.h
+++ b/include/uapi/rdma/irdma-abi.h
@@ -20,11 +20,14 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_MEM = 0,
IRDMA_MEMREG_TYPE_QP = 1,
IRDMA_MEMREG_TYPE_CQ = 2,
+ IRDMA_MEMREG_TYPE_SRQ = 3,
};
enum {
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+ IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA = 1 << 2,
+ IRDMA_SUPPORT_WQE_FORMAT_V2 = 1 << 3,
};
struct irdma_alloc_ucontext_req {
@@ -54,7 +57,8 @@ struct irdma_alloc_ucontext_resp {
__u8 rsvd2;
__aligned_u64 comp_mask;
__u16 min_hw_wq_size;
- __u8 rsvd3[6];
+ __u32 max_hw_srq_quanta;
+ __u8 rsvd3[2];
};
struct irdma_alloc_pd_resp {
@@ -71,6 +75,16 @@ struct irdma_create_cq_req {
__aligned_u64 user_shadow_area;
};
+struct irdma_create_srq_req {
+ __aligned_u64 user_srq_buf;
+ __aligned_u64 user_shadow_area;
+};
+
+struct irdma_create_srq_resp {
+ __u32 srq_id;
+ __u32 srq_size;
+};
+
struct irdma_create_qp_req {
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 7cea03581f79..5ded174687ee 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -67,7 +67,9 @@ enum {
RDMA_USER_CM_CMD_QUERY,
RDMA_USER_CM_CMD_BIND,
RDMA_USER_CM_CMD_RESOLVE_ADDR,
- RDMA_USER_CM_CMD_JOIN_MCAST
+ RDMA_USER_CM_CMD_JOIN_MCAST,
+ RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE,
+ RDMA_USER_CM_CMD_WRITE_CM_EVENT,
};
/* See IBTA Annex A11, servies ID bytes 4 & 5 */
@@ -147,7 +149,8 @@ struct rdma_ucm_resolve_route {
enum {
RDMA_USER_CM_QUERY_ADDR,
RDMA_USER_CM_QUERY_PATH,
- RDMA_USER_CM_QUERY_GID
+ RDMA_USER_CM_QUERY_GID,
+ RDMA_USER_CM_QUERY_IB_SERVICE
};
struct rdma_ucm_query {
@@ -187,6 +190,11 @@ struct rdma_ucm_query_path_resp {
struct ib_path_rec_data path_data[];
};
+struct rdma_ucm_query_ib_service_resp {
+ __u32 num_service_recs;
+ struct ib_user_service_rec recs[];
+};
+
struct rdma_ucm_conn_param {
__u32 qp_num;
__u32 qkey;
@@ -297,6 +305,7 @@ struct rdma_ucm_event_resp {
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
+ __u32 arg32[2];
} param;
__u32 reserved;
struct rdma_ucm_ece ece;
@@ -338,4 +347,33 @@ struct rdma_ucm_migrate_resp {
__u32 events_reported;
};
+enum {
+ RDMA_USER_CM_IB_SERVICE_FLAG_ID = 1 << 0,
+ RDMA_USER_CM_IB_SERVICE_FLAG_NAME = 1 << 1,
+};
+
+#define RDMA_USER_CM_IB_SERVICE_NAME_SIZE 64
+struct rdma_ucm_ib_service {
+ __u64 service_id;
+ __u8 service_name[RDMA_USER_CM_IB_SERVICE_NAME_SIZE];
+ __u32 flags;
+ __u32 reserved;
+};
+
+struct rdma_ucm_resolve_ib_service {
+ __u32 id;
+ struct rdma_ucm_ib_service ibs;
+};
+
+struct rdma_ucm_write_cm_event {
+ __u32 id;
+ __u32 reserved;
+ __u32 event;
+ __u32 status;
+ union {
+ struct rdma_ucm_conn_param conn;
+ struct rdma_ucm_ud_param ud;
+ __u64 arg;
+ } param;
+};
#endif /* RDMA_USER_CM_H */
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 16782c360de3..019096beb179 100644
--- a/include/uapi/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
@@ -11,6 +11,12 @@
#include <linux/types.h>
#include <asm/byteorder.h>
+#ifdef __KERNEL__
+#include <linux/stddef.h> /* for offsetof */
+#else
+#include <stddef.h> /* for offsetof */
+#endif
+
/*
* Fibre Channel Switch - Enhanced Link Services definitions.
* From T11 FC-LS Rev 1.2 June 7, 2005.
@@ -1109,12 +1115,15 @@ struct fc_els_fpin {
/* Diagnostic Function Descriptor - FPIN Registration */
struct fc_df_desc_fpin_reg {
- __be32 desc_tag; /* FPIN Registration (0x00030001) */
- __be32 desc_len; /* Length of Descriptor (in bytes).
- * Size of descriptor excluding
- * desc_tag and desc_len fields.
- */
- __be32 count; /* Number of desc_tags elements */
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(fc_df_desc_fpin_reg_hdr, __hdr, /* no attrs */,
+ __be32 desc_tag; /* FPIN Registration (0x00030001) */
+ __be32 desc_len; /* Length of Descriptor (in bytes).
+ * Size of descriptor excluding
+ * desc_tag and desc_len fields.
+ */
+ __be32 count; /* Number of desc_tags elements */
+ );
__be32 desc_tags[]; /* Array of Descriptor Tags.
* Each tag indicates a function
* supported by the N_Port (request)
@@ -1124,33 +1133,44 @@ struct fc_df_desc_fpin_reg {
* See ELS_FN_DTAG_xxx for tag values.
*/
};
+_Static_assert(offsetof(struct fc_df_desc_fpin_reg, desc_tags) == sizeof(struct fc_df_desc_fpin_reg_hdr),
+ "struct member likely outside of __struct_group()");
/*
* ELS_RDF - Register Diagnostic Functions
*/
struct fc_els_rdf {
- __u8 fpin_cmd; /* command (0x19) */
- __u8 fpin_zero[3]; /* specified as zero - part of cmd */
- __be32 desc_len; /* Length of Descriptor List (in bytes).
- * Size of ELS excluding fpin_cmd,
- * fpin_zero and desc_len fields.
- */
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(fc_els_rdf_hdr, __hdr, /* no attrs */,
+ __u8 fpin_cmd; /* command (0x19) */
+ __u8 fpin_zero[3]; /* specified as zero - part of cmd */
+ __be32 desc_len; /* Length of Descriptor List (in bytes).
+ * Size of ELS excluding fpin_cmd,
+ * fpin_zero and desc_len fields.
+ */
+ );
struct fc_tlv_desc desc[]; /* Descriptor list */
};
+_Static_assert(offsetof(struct fc_els_rdf, desc) == sizeof(struct fc_els_rdf_hdr),
+ "struct member likely outside of __struct_group()");
/*
* ELS RDF LS_ACC Response.
*/
struct fc_els_rdf_resp {
- struct fc_els_ls_acc acc_hdr;
- __be32 desc_list_len; /* Length of response (in
- * bytes). Excludes acc_hdr
- * and desc_list_len fields.
- */
- struct fc_els_lsri_desc lsri;
+ /* New members MUST be added within the __struct_group() macro below. */
+ __struct_group(fc_els_rdf_resp_hdr, __hdr, /* no attrs */,
+ struct fc_els_ls_acc acc_hdr;
+ __be32 desc_list_len; /* Length of response (in
+ * bytes). Excludes acc_hdr
+ * and desc_list_len fields.
+ */
+ struct fc_els_lsri_desc lsri;
+ );
struct fc_tlv_desc desc[]; /* Supported Descriptor list */
};
-
+_Static_assert(offsetof(struct fc_els_rdf_resp, desc) == sizeof(struct fc_els_rdf_resp_hdr),
+ "struct member likely outside of __struct_group()");
/*
* Diagnostic Capability Descriptors for EDC ELS
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index d62eb93af0ed..b610683fd8db 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -13,8 +13,7 @@
#include <sound/asound.h>
#include <sound/compress_params.h>
-
-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 3, 0)
+#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 4, 1)
/**
* struct snd_compressed_buffer - compressed buffer
* @fragment_size: size of buffer fragment in bytes
@@ -57,6 +56,25 @@ struct snd_compr_tstamp {
} __attribute__((packed, aligned(4)));
/**
+ * struct snd_compr_tstamp64 - timestamp descriptor with fields in 64 bit
+ * @byte_offset: Byte offset in ring buffer to DSP
+ * @copied_total: Total number of bytes copied from/to ring buffer to/by DSP
+ * @pcm_frames: Frames decoded or encoded by DSP. This field will evolve by
+ * large steps and should only be used to monitor encoding/decoding
+ * progress. It shall not be used for timing estimates.
+ * @pcm_io_frames: Frames rendered or received by DSP into a mixer or an audio
+ * output/input. This field should be used for A/V sync or time estimates.
+ * @sampling_rate: sampling rate of audio
+ */
+struct snd_compr_tstamp64 {
+ __u32 byte_offset;
+ __u64 copied_total;
+ __u64 pcm_frames;
+ __u64 pcm_io_frames;
+ __u32 sampling_rate;
+} __attribute__((packed, aligned(4)));
+
+/**
* struct snd_compr_avail - avail descriptor
* @avail: Number of bytes available in ring buffer for writing/reading
* @tstamp: timestamp information
@@ -66,6 +84,16 @@ struct snd_compr_avail {
struct snd_compr_tstamp tstamp;
} __attribute__((packed, aligned(4)));
+/**
+ * struct snd_compr_avail64 - avail descriptor with tstamp in 64 bit format
+ * @avail: Number of bytes available in ring buffer for writing/reading
+ * @tstamp: timestamp information
+ */
+struct snd_compr_avail64 {
+ __u64 avail;
+ struct snd_compr_tstamp64 tstamp;
+} __attribute__((packed, aligned(4)));
+
enum snd_compr_direction {
SND_COMPRESS_PLAYBACK = 0,
SND_COMPRESS_CAPTURE,
@@ -189,6 +217,7 @@ struct snd_compr_task_status {
* Note: only codec params can be changed runtime and stream params cant be
* SNDRV_COMPRESS_GET_PARAMS: Query codec params
* SNDRV_COMPRESS_TSTAMP: get the current timestamp value
+ * SNDRV_COMPRESS_TSTAMP64: get the current timestamp value in 64 bit format
* SNDRV_COMPRESS_AVAIL: get the current buffer avail value.
* This also queries the tstamp properties
* SNDRV_COMPRESS_PAUSE: Pause the running stream
@@ -211,6 +240,8 @@ struct snd_compr_task_status {
struct snd_compr_metadata)
#define SNDRV_COMPRESS_TSTAMP _IOR('C', 0x20, struct snd_compr_tstamp)
#define SNDRV_COMPRESS_AVAIL _IOR('C', 0x21, struct snd_compr_avail)
+#define SNDRV_COMPRESS_TSTAMP64 _IOR('C', 0x22, struct snd_compr_tstamp64)
+#define SNDRV_COMPRESS_AVAIL64 _IOR('C', 0x23, struct snd_compr_avail64)
#define SNDRV_COMPRESS_PAUSE _IO('C', 0x30)
#define SNDRV_COMPRESS_RESUME _IO('C', 0x31)
#define SNDRV_COMPRESS_START _IO('C', 0x32)
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index bc7648a30746..d7db6b4e1166 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -43,7 +43,8 @@
#define SND_AUDIOCODEC_BESPOKE ((__u32) 0x0000000E)
#define SND_AUDIOCODEC_ALAC ((__u32) 0x0000000F)
#define SND_AUDIOCODEC_APE ((__u32) 0x00000010)
-#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_APE
+#define SND_AUDIOCODEC_OPUS_RAW ((__u32) 0x00000011)
+#define SND_AUDIOCODEC_MAX SND_AUDIOCODEC_OPUS_RAW
/*
* Profile and modes are listed with bit masks. This allows for a
@@ -324,6 +325,43 @@ struct snd_dec_ape {
__u32 seek_table_present;
} __attribute__((packed, aligned(4)));
+/**
+ * struct snd_dec_opus - Opus decoder parameters (raw opus packets)
+ * @version: Usually should be '1' but can be split into major (4 upper bits)
+ * and minor (4 lower bits) sub-fields.
+ * @num_channels: Number of output channels.
+ * @pre_skip: Number of samples to discard at 48 kHz.
+ * @sample_rate: Sample rate of original input.
+ * @output_gain: Gain to apply when decoding (in Q7.8 format).
+ * @mapping_family: Order and meaning of output channels. Only values 0 and 1
+ * are expected; values 2..255 are not recommended for playback.
+ *
+ * @chan_map: Optional channel mapping table. Describes mapping of opus streams
+ * to decoded channels. Fields:
+ * @chan_map.stream_count: Number of streams encoded in each Ogg packet.
+ * @chan_map.coupled_count: Number of streams whose decoders are used
+ * for two channels.
+ * @chan_map.channel_map: Which decoded channel to be used for each one.
+ * Supports only mapping families 0 and 1,
+ * max number of channels is 8.
+ *
+ * These options were extracted from RFC7845 Section 5.
+ */
+
+struct snd_dec_opus {
+ __u8 version;
+ __u8 num_channels;
+ __u16 pre_skip;
+ __u32 sample_rate;
+ __u16 output_gain;
+ __u8 mapping_family;
+ struct snd_dec_opus_ch_map {
+ __u8 stream_count;
+ __u8 coupled_count;
+ __u8 channel_map[8];
+ } chan_map;
+} __attribute__((packed, aligned(4)));
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
@@ -334,6 +372,7 @@ union snd_codec_options {
struct snd_dec_wma wma_d;
struct snd_dec_alac alac_d;
struct snd_dec_ape ape_d;
+ struct snd_dec_opus opus_d;
struct {
__u32 out_sample_rate;
} src_d;
diff --git a/include/uapi/sound/intel/avs/tokens.h b/include/uapi/sound/intel/avs/tokens.h
index c9f845b3c523..f3ff6aae09a9 100644
--- a/include/uapi/sound/intel/avs/tokens.h
+++ b/include/uapi/sound/intel/avs/tokens.h
@@ -133,6 +133,21 @@ enum avs_tplg_token {
AVS_TKN_PATH_FE_FMT_ID_U32 = 1902,
AVS_TKN_PATH_BE_FMT_ID_U32 = 1903,
+ /* struct avs_tplg_path_template (conditional) */
+ AVS_TKN_CONDPATH_TMPL_ID_U32 = 1801,
+ AVS_TKN_CONDPATH_TMPL_SOURCE_TPLG_NAME_STRING = 2002,
+ AVS_TKN_CONDPATH_TMPL_SOURCE_PATH_TMPL_ID_U32 = 2003,
+ AVS_TKN_CONDPATH_TMPL_SINK_TPLG_NAME_STRING = 2004,
+ AVS_TKN_CONDPATH_TMPL_SINK_PATH_TMPL_ID_U32 = 2005,
+ AVS_TKN_CONDPATH_TMPL_COND_TYPE_U32 = 2006,
+ AVS_TKN_CONDPATH_TMPL_OVERRIDABLE_BOOL = 2007,
+ AVS_TKN_CONDPATH_TMPL_PRIORITY_U8 = 2008,
+
+ /* struct avs_tplg_path (conditional) */
+ AVS_TKN_CONDPATH_ID_U32 = 1901,
+ AVS_TKN_CONDPATH_SOURCE_PATH_ID_U32 = 2102,
+ AVS_TKN_CONDPATH_SINK_PATH_ID_U32 = 2103,
+
/* struct avs_tplg_pin_format */
AVS_TKN_PIN_FMT_INDEX_U32 = 2201,
AVS_TKN_PIN_FMT_IOBS_U32 = 2202,
diff --git a/include/uapi/sound/snd_ar_tokens.h b/include/uapi/sound/snd_ar_tokens.h
index b9b9093b4396..6b8102eaa121 100644
--- a/include/uapi/sound/snd_ar_tokens.h
+++ b/include/uapi/sound/snd_ar_tokens.h
@@ -3,6 +3,8 @@
#ifndef __SND_AR_TOKENS_H__
#define __SND_AR_TOKENS_H__
+#include <linux/types.h>
+
#define APM_SUB_GRAPH_PERF_MODE_LOW_POWER 0x1
#define APM_SUB_GRAPH_PERF_MODE_LOW_LATENCY 0x2
@@ -118,6 +120,12 @@ enum ar_event_types {
* LPAIF_WSA = 2,
* LPAIF_VA = 3,
* LPAIF_AXI = 4
+ * Possible values for MI2S
+ * I2S_INTF_TYPE_PRIMARY = 0,
+ * I2S_INTF_TYPE_SECONDARY = 1,
+ * I2S_INTF_TYPE_TERTIARY = 2,
+ * I2S_INTF_TYPE_QUATERNARY = 3,
+ * I2S_INTF_TYPE_QUINARY = 4,
*
* %AR_TKN_U32_MODULE_FMT_INTERLEAVE: PCM Interleaving
* PCM_INTERLEAVED = 1,
@@ -184,8 +192,8 @@ enum ar_event_types {
#define AR_TKN_U32_MODULE_INSTANCE_ID 201
#define AR_TKN_U32_MODULE_MAX_IP_PORTS 202
#define AR_TKN_U32_MODULE_MAX_OP_PORTS 203
-#define AR_TKN_U32_MODULE_IN_PORTS 204
-#define AR_TKN_U32_MODULE_OUT_PORTS 205
+#define AR_TKN_U32_MODULE_IN_PORTS 204 /* deprecated */
+#define AR_TKN_U32_MODULE_OUT_PORTS 205 /* deprecated */
#define AR_TKN_U32_MODULE_SRC_OP_PORT_ID 206
#define AR_TKN_U32_MODULE_DST_IN_PORT_ID 207
#define AR_TKN_U32_MODULE_SRC_INSTANCE_ID 208
@@ -232,4 +240,12 @@ enum ar_event_types {
#define AR_TKN_U32_MODULE_LOG_TAP_POINT_ID 260
#define AR_TKN_U32_MODULE_LOG_MODE 261
+#define SND_SOC_AR_TPLG_MODULE_CFG_TYPE 0x01001006
+struct audioreach_module_priv_data {
+ __le32 size; /* size in bytes of the array, including all elements */
+ __le32 type; /* SND_SOC_AR_TPLG_MODULE_CFG_TYPE */
+ __le32 priv[2]; /* Private data for future expansion */
+ __le32 data[0]; /* config data */
+};
+
#endif /* __SND_AR_TOKENS_H__ */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index c28c766270de..9ce72fbd6f11 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -106,6 +106,8 @@
*/
#define SOF_TKN_COMP_NO_WNAME_IN_KCONTROL_NAME 417
+#define SOF_TKN_COMP_SCHED_DOMAIN 418
+
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
#define SOF_TKN_INTEL_SSP_MCLK_ID 501
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 72fd385037a6..245a6a829ce9 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -653,21 +653,4 @@ struct ufs_dev_info {
bool hid_sup;
};
-/*
- * This enum is used in string mapping in ufs_trace.h.
- */
-enum ufs_trace_str_t {
- UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
- UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
- UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
-};
-
-/*
- * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
- * used in ufs_trace.h for UFS command trace.
- */
-enum ufs_trace_tsf_t {
- UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
-};
-
#endif /* End of Header */
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index f52de5ed1b3b..83563247c36c 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -113,4 +113,7 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12)
+/* Some UFS 4 devices do not support the qTimestamp attribute */
+#define UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT (1 << 13)
+
#endif /* UFS_QUIRKS_H_ */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 1d3943777584..9425cfd9d00e 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -167,13 +167,13 @@ struct ufs_pm_lvl_states {
* @task_tag: Task tag of the command
* @lun: LUN of the command
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @req_abort_skip: skip request abort task flag
* @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC)
* @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock)
* @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC)
* @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock)
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto
- * @req_abort_skip: skip request abort task flag
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -193,6 +193,7 @@ struct ufshcd_lrb {
int task_tag;
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
+ bool req_abort_skip;
ktime_t issue_time_stamp;
u64 issue_time_stamp_local_clock;
ktime_t compl_time_stamp;
@@ -201,8 +202,6 @@ struct ufshcd_lrb {
int crypto_key_slot;
u64 data_unit_num;
#endif
-
- bool req_abort_skip;
};
/**
@@ -795,30 +794,6 @@ struct ufs_hba_monitor {
};
/**
- * struct ufshcd_res_info_t - MCQ related resource regions
- *
- * @name: resource name
- * @resource: pointer to resource region
- * @base: register base address
- */
-struct ufshcd_res_info {
- const char *name;
- struct resource *resource;
- void __iomem *base;
-};
-
-enum ufshcd_res {
- RES_UFS,
- RES_MCQ,
- RES_MCQ_SQD,
- RES_MCQ_SQIS,
- RES_MCQ_CQD,
- RES_MCQ_CQIS,
- RES_MCQ_VS,
- RES_MAX,
-};
-
-/**
* struct ufshcd_mcq_opr_info_t - Operation and Runtime registers
*
* @offset: Doorbell Address Offset
@@ -963,6 +938,7 @@ enum ufshcd_mcq_opr {
* @ufs_rtc_update_work: A work for UFS RTC periodic update
* @pm_qos_req: PM QoS request handle
* @pm_qos_enabled: flag to check if pm qos is enabled
+ * @pm_qos_mutex: synchronizes PM QoS request and status updates
* @critical_health_count: count of critical health exceptions
* @dev_lvl_exception_count: count of device level exceptions since last reset
* @dev_lvl_exception_id: vendor specific information about the
@@ -1127,7 +1103,6 @@ struct ufs_hba {
bool lsdb_sup;
bool mcq_enabled;
bool mcq_esi_enabled;
- struct ufshcd_res_info res[RES_MAX];
void __iomem *mcq_base;
struct ufs_hw_queue *uhq;
struct ufs_hw_queue *dev_cmd_queue;
@@ -1136,6 +1111,8 @@ struct ufs_hba {
struct delayed_work ufs_rtc_update_work;
struct pm_qos_request pm_qos_req;
bool pm_qos_enabled;
+ /* synchronizes PM QoS request and status updates */
+ struct mutex pm_qos_mutex;
int critical_health_count;
atomic_t dev_lvl_exception_count;
@@ -1318,6 +1295,7 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
void ufshcd_enable_irq(struct ufs_hba *hba);
void ufshcd_disable_irq(struct ufs_hba *hba);
+void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
@@ -1508,5 +1486,6 @@ int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
int ufshcd_write_ee_control(struct ufs_hba *hba);
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
const u16 *other_mask, u16 set, u16 clr);
+void ufshcd_force_error_recovery(struct ufs_hba *hba);
#endif /* End of Header */
diff --git a/include/video/pixel_format.h b/include/video/pixel_format.h
index b5104b2a3a13..6874754b0474 100644
--- a/include/video/pixel_format.h
+++ b/include/video/pixel_format.h
@@ -20,6 +20,9 @@ struct pixel_format {
};
};
+#define PIXEL_FORMAT_C8 \
+ { 8, true, { .index = {0, 8}, } }
+
#define PIXEL_FORMAT_XRGB1555 \
{ 16, false, { .alpha = {0, 0}, .red = {10, 5}, .green = {5, 5}, .blue = {0, 5} } }
@@ -38,4 +41,62 @@ struct pixel_format {
#define PIXEL_FORMAT_XRGB2101010 \
{ 32, false, { .alpha = {0, 0}, .red = {20, 10}, .green = {10, 10}, .blue = {0, 10} } }
+#define __pixel_format_cmp_field(lhs, rhs, name) \
+ { \
+ int ret = ((lhs)->name) - ((rhs)->name); \
+ if (ret) \
+ return ret; \
+ }
+
+#define __pixel_format_cmp_bitfield(lhs, rhs, name) \
+ { \
+ __pixel_format_cmp_field(lhs, rhs, name.offset); \
+ __pixel_format_cmp_field(lhs, rhs, name.length); \
+ }
+
+/**
+ * pixel_format_cmp - Compares two pixel-format descriptions
+ *
+ * @lhs: a pixel-format description
+ * @rhs: a pixel-format description
+ *
+ * Compares two pixel-format descriptions for their order. The semantics
+ * are equivalent to memcmp().
+ *
+ * Returns:
+ * 0 if both arguments describe the same pixel format, less-than-zero if lhs < rhs,
+ * or greater-than-zero if lhs > rhs.
+ */
+static inline int pixel_format_cmp(const struct pixel_format *lhs, const struct pixel_format *rhs)
+{
+ __pixel_format_cmp_field(lhs, rhs, bits_per_pixel);
+ __pixel_format_cmp_field(lhs, rhs, indexed);
+
+ if (lhs->indexed) {
+ __pixel_format_cmp_bitfield(lhs, rhs, index);
+ } else {
+ __pixel_format_cmp_bitfield(lhs, rhs, alpha);
+ __pixel_format_cmp_bitfield(lhs, rhs, red);
+ __pixel_format_cmp_bitfield(lhs, rhs, green);
+ __pixel_format_cmp_bitfield(lhs, rhs, blue);
+ }
+
+ return 0;
+}
+
+/**
+ * pixel_format_equal - Compares two pixel-format descriptions for equality
+ *
+ * @lhs: a pixel-format description
+ * @rhs: a pixel-format description
+ *
+ * Returns:
+ * True if both arguments describe the same pixel format, or false otherwise.
+ */
+static inline bool pixel_format_equal(const struct pixel_format *lhs,
+ const struct pixel_format *rhs)
+{
+ return !pixel_format_cmp(lhs, rhs);
+}
+
#endif